diff --git a/.github/workflows/e2e-tests.yaml b/.github/workflows/e2e-tests.yaml
new file mode 100644
index 0000000..cc50ac4
--- /dev/null
+++ b/.github/workflows/e2e-tests.yaml
@@ -0,0 +1,422 @@
+name: E2E Tests
+
+permissions:
+ contents: read
+
+on:
+ push:
+ branches:
+ - main
+ pull_request:
+ branches:
+ - main
+ paths-ignore:
+ - '**/*.md'
+ - 'docs/**'
+
+jobs:
+ build:
+ name: Build Application and UI
+ runs-on: ubuntu-latest
+
+ services:
+ mailhog:
+ image: mailhog/mailhog
+ ports:
+ - 1025:1025
+ - 8025:8025
+
+ steps:
+ - name: Checkout code
+ uses: actions/checkout@v4
+
+ - name: Set up Go
+ uses: actions/setup-go@v5
+ with:
+ go-version: '1.23.4'
+ cache: true
+
+ - name: Setup Bun
+ uses: oven-sh/setup-bun@v2
+
+ - name: Build UI
+ run: |
+ cd web
+ bun install
+ export API_URL="/api"
+ bun run build
+
+ - name: Install dependencies
+ run: |
+ go mod download
+ sudo apt-get update
+ sudo apt-get install -y build-essential
+ - name: Cache Go modules
+ uses: actions/cache@v4
+ with:
+ path: |
+ ~/.cache/go-build
+ ~/go/pkg/mod
+ key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
+ restore-keys: |
+ ${{ runner.os }}-go-
+
+ - name: Cache built binary
+ id: cache-binary
+ uses: actions/cache@v4
+ with:
+ path: chainlaunch
+ key: ${{ runner.os }}-chainlaunch-bin-${{ hashFiles('**/*.go', '**/go.sum', '**/go.mod') }}
+
+ - name: Build the application
+ if: steps.cache-binary.outputs.cache-hit != 'true'
+ run: |
+ go build -v -o chainlaunch ./main.go
+ chmod +x chainlaunch
+
+ - name: Upload build artifacts
+ uses: actions/upload-artifact@v4
+ with:
+ name: build-artifacts
+ path: |
+ chainlaunch
+ web/dist
+
+ testnet-besu:
+ name: Create Besu Testnet
+ runs-on: ubuntu-latest
+ needs: build
+ env:
+ CHAINLAUNCH_USER: admin
+ CHAINLAUNCH_PASSWORD: admin123
+ CHAINLAUNCH_DATA: ${{ github.workspace }}/test-data
+ CHAINLAUNCH_API_URL: http://localhost:8100/api/v1
+ steps:
+ - name: Checkout code
+ uses: actions/checkout@v4
+ - name: Download build artifacts
+ uses: actions/download-artifact@v4
+ with:
+ name: build-artifacts
+
+ - name: Start the application and verify API is available
+ run: |
+ echo "CHAINLAUNCH_DATA: $CHAINLAUNCH_DATA"
+ chmod +x chainlaunch
+ ./chainlaunch serve --data=$CHAINLAUNCH_DATA --port=8100 --db data.db &
+ # Wait for port 8100 to be available (60 seconds timeout)
+ timeout=60
+ while ! curl -s http://localhost:8100 > /dev/null; do
+ if [ $timeout -le 0 ]; then
+ echo "Timeout waiting for API to become available"
+ exit 1
+ fi
+ echo "Waiting for API to become available... ($timeout seconds remaining)"
+ sleep 1
+ timeout=$((timeout - 1))
+ done
+ - name: Create and verify Besu testnet
+ run: |
+ # Create a new Besu testnet
+ ./chainlaunch testnet besu --name mynet --nodes 4 --prefix besu-test --mode=docker
+ # Wait for port 8545 to be available (60 seconds timeout)
+ timeout=60
+ while ! curl -s http://localhost:8545 > /dev/null; do
+ if [ $timeout -le 0 ]; then
+ echo "Timeout waiting for Besu node to become available"
+ exit 1
+ fi
+ echo "Waiting for Besu node to become available... ($timeout seconds remaining)"
+ sleep 1
+ timeout=$((timeout - 1))
+ done
+
+ # Wait for nodes to start producing blocks (up to 90 seconds)
+ for i in {1..90}; do
+ # Make the curl request and capture both stdout and stderr
+ if ! resp=$(curl -s -f -X POST --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}' http://localhost:8545 2>&1); then
+ curl_status=$?
+ case $curl_status in
+ 56)
+ echo "Connection refused to node (status: $curl_status). Retrying..."
+ ;;
+ 7)
+ echo "Failed to connect to host (status: $curl_status). Retrying..."
+ ;;
+ 28)
+ echo "Operation timed out (status: $curl_status). Retrying..."
+ ;;
+ 22)
+ echo "HTTP response code indicated error (status: $curl_status). Retrying..."
+ ;;
+ *)
+ echo "Curl failed with status $curl_status: $resp. Retrying..."
+ ;;
+ esac
+ sleep 1
+ continue
+ fi
+
+ # Check for empty response
+ if [ -z "$resp" ]; then
+ echo "Empty response received from node"
+ sleep 1
+ continue
+ fi
+
+ # Parse the response with error handling
+ if ! block_hex=$(echo "$resp" | jq -r .result 2>/dev/null); then
+ echo "Failed to parse JSON response: $resp"
+ sleep 1
+ continue
+ fi
+
+ # Check for JSON-RPC errors
+ if error=$(echo "$resp" | jq -r .error 2>/dev/null) && [ "$error" != "null" ]; then
+ echo "JSON-RPC error received: $error"
+ sleep 1
+ continue
+ fi
+
+ if [ "$block_hex" = "null" ] || [ -z "$block_hex" ]; then
+ echo "Invalid block number received"
+ sleep 1
+ continue
+ fi
+
+ # Validate hex format
+ if [[ ! "$block_hex" =~ ^0x[0-9a-fA-F]+$ ]]; then
+ echo "Invalid hex format received: $block_hex"
+ sleep 1
+ continue
+ fi
+
+ # Convert hex to decimal with error handling
+ if ! block_num=$((16#${block_hex:2})) 2>/dev/null; then
+ echo "Failed to convert block number from hex: $block_hex"
+ sleep 1
+ continue
+ fi
+
+ echo "Current block: $block_num"
+ if [ "$block_num" -ge 5 ]; then
+ echo "Besu node has reached block >= 5"
+ exit 0
+ fi
+ sleep 1
+ done
+
+ echo "Timeout waiting for blocks to be produced"
+ exit 1
+ - name: Show Besu containers and logs (always)
+ if: always()
+ run: |
+ echo "==== besu list nodes ===="
+ ./chainlaunch besu list
+ echo "==== docker ps ===="
+ docker ps -a || true
+ echo "==== docker logs (besu containers) ===="
+ for cid in $(docker ps -a --filter "name=besu-test" --format "{{.ID}}" ); do
+ echo "--- Logs for container $cid ---"
+ docker logs $cid || true
+ done
+
+ testnet-fabric:
+ name: Create Fabric Testnet
+ runs-on: ubuntu-latest
+ needs: build
+ env:
+ CHAINLAUNCH_USER: admin
+ CHAINLAUNCH_PASSWORD: admin123
+ CHAINLAUNCH_DATA: ${{ github.workspace }}/test-data
+ CHAINLAUNCH_API_URL: http://localhost:8100/api/v1
+ steps:
+ - name: Checkout code
+ uses: actions/checkout@v4
+ - name: Download build artifacts
+ uses: actions/download-artifact@v4
+ with:
+ name: build-artifacts
+ - name: Start the application and verify API is available
+ run: |
+ echo "CHAINLAUNCH_DATA: $CHAINLAUNCH_DATA"
+ chmod +x chainlaunch
+ ./chainlaunch serve --data=$CHAINLAUNCH_DATA --port=8100 --db data.db &
+ # Wait for port 8100 to be available (60 seconds timeout)
+ timeout=60
+ while ! curl -s http://localhost:8100 > /dev/null; do
+ if [ $timeout -le 0 ]; then
+ echo "Timeout waiting for API to become available"
+ exit 1
+ fi
+ echo "Waiting for API to become available... ($timeout seconds remaining)"
+ sleep 1
+ timeout=$((timeout - 1))
+ done
+
+ - name: Create Fabric testnet
+ run: |
+ ./chainlaunch testnet fabric --name mynet --org "Org1MSP123" --peerOrgs "Org1MSP123" --ordererOrgs "OrdererOrg123" --channels mychannel --peerCounts "Org1MSP123=2" --ordererCounts "OrdererOrg123=3" --mode=docker
+ - name: Test get a block from the channel
+ run: |
+ export NETWORK_ID=$(./chainlaunch networks fabric list --output=json | jq -r '.networks[0].id')
+ response=$(curl -s -w "%{http_code}" -X 'GET' \
+ "http://localhost:8100/api/v1/networks/fabric/$NETWORK_ID/blocks/0" \
+ -H 'accept: application/json' \
+ -u "$CHAINLAUNCH_USER:$CHAINLAUNCH_PASSWORD")
+ status_code=${response: -3}
+ response_body=${response:0:-3}
+ if [ "$status_code" -ne 200 ]; then
+ echo "Error: Expected status code 200, got $status_code"
+ echo "Response body: $response_body"
+ exit 1
+ fi
+ echo "Got a block from the channel with status code 200"
+ - name: Show Fabric containers and logs (debug)
+ if: always()
+ run: |
+ echo "==== list fabric networks ===="
+ ./chainlaunch networks fabric list --output=json
+ echo "==== list fabric peers ===="
+ ./chainlaunch fabric peer list --output=json
+ echo "==== list fabric orderers ===="
+ ./chainlaunch fabric orderer list --output=json
+ echo "==== docker ps ===="
+ docker ps -a || true
+ echo "==== docker logs (fabric containers) ===="
+ for cid in $(docker ps -a --filter "name=fabric" --format "{{.ID}}" ); do
+ echo "--- Logs for container $cid ---"
+ docker logs $cid || true
+ done
+
+ api-e2e:
+ name: Run API E2E Tests
+ runs-on: ubuntu-latest
+ needs: [build]
+
+ services:
+ mailhog:
+ image: mailhog/mailhog
+ ports:
+ - 1025:1025
+ - 8025:8025
+
+ env:
+ API_BASE_URL: http://localhost:8100/api/v1
+ CHAINLAUNCH_USER: admin
+ CHAINLAUNCH_PASSWORD: admin123
+ API_USERNAME: admin
+ API_PASSWORD: admin123
+
+ steps:
+ - name: Checkout code
+ uses: actions/checkout@v4
+
+ - name: Download build artifacts
+ uses: actions/download-artifact@v4
+ with:
+ name: build-artifacts
+
+ - name: Start the application and run API tests
+ run: |
+ export CHAINLAUNCH_USER=admin
+ export CHAINLAUNCH_PASSWORD=admin123
+ export CHAINLAUNCH_DATA=${{ github.workspace }}/test-data
+ echo "CHAINLAUNCH_DATA: $CHAINLAUNCH_DATA"
+ chmod +x chainlaunch
+ ./chainlaunch serve --data=$CHAINLAUNCH_DATA --port=8100 --db data.db &
+ # Wait for port 8100 to be available (60 seconds timeout)
+ timeout=60
+ while ! curl -s http://localhost:8100 > /dev/null; do
+ if [ $timeout -le 0 ]; then
+ echo "Timeout waiting for API to become available"
+ exit 1
+ fi
+ echo "Waiting for API to become available... ($timeout seconds remaining)"
+ sleep 1
+ timeout=$((timeout - 1))
+ done
+
+ - name: Upload API test results
+ if: always()
+ uses: actions/upload-artifact@v4
+ with:
+ name: api-test-results
+ path: test-results.xml
+
+ ui-e2e:
+ name: Run UI E2E Tests
+ runs-on: ubuntu-latest
+ needs: build
+
+ services:
+ mailhog:
+ image: mailhog/mailhog
+ ports:
+ - 1025:1025
+ - 8025:8025
+
+ env:
+ CYPRESS_BASE_URL: http://localhost:8100
+ CYPRESS_API_URL: http://localhost:8100/api/v1
+ PLAYWRIGHT_USER: admin
+ PLAYWRIGHT_PASSWORD: admin123
+ CHAINLAUNCH_USER: admin
+ CHAINLAUNCH_PASSWORD: admin123
+ PLAYWRIGHT_BASE_URL: http://localhost:8100
+
+ steps:
+ - name: Checkout code
+ uses: actions/checkout@v4
+ - name: Download build artifacts
+ uses: actions/download-artifact@v4
+ with:
+ name: build-artifacts
+ - name: Cache Playwright Browsers
+ uses: actions/cache@v4
+ with:
+ path: ~/.cache/ms-playwright
+ key: playwright-browsers-${{ runner.os }}-${{ hashFiles('web/package.json', 'web/bun.lockb') }}
+ restore-keys: |
+ playwright-browsers-${{ runner.os }}-
+
+ - name: Setup Bun
+ uses: oven-sh/setup-bun@v2
+ - name: Build UI
+ run: |
+ cd web
+ bun install
+
+ - name: Install Playwright Browsers
+ run: |
+ cd web
+ bunx playwright install --with-deps
+
+ - name: Start server and run UI tests
+ run: |
+ export CHAINLAUNCH_DATA=${{ github.workspace }}/test-data
+ chmod +x chainlaunch
+ ./chainlaunch serve --data=$CHAINLAUNCH_DATA --port=8100 --db data.db &
+
+ # Wait for port 8100 to be available (60 seconds timeout)
+ timeout=60
+ while ! curl -s http://localhost:8100 > /dev/null; do
+ if [ $timeout -le 0 ]; then
+ echo "Timeout waiting for API to become available"
+ exit 1
+ fi
+ echo "Waiting for API to become available... ($timeout seconds remaining)"
+ sleep 1
+ timeout=$((timeout - 1))
+ done
+
+ cd web
+ bun run test:e2e
+
+ - name: Upload UI test results
+ if: always()
+ uses: actions/upload-artifact@v4
+ with:
+ name: ui-test-results
+ path: |
+ web/test-results
diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml
index bedd360..14477ac 100644
--- a/.github/workflows/test.yaml
+++ b/.github/workflows/test.yaml
@@ -31,6 +31,7 @@ jobs:
bun install
export API_URL="/api"
bun run build
+
- name: Upload chainlaunch-ui artifact
uses: actions/upload-artifact@v4
with:
diff --git a/.gitignore b/.gitignore
index 51749ba..3942772 100644
--- a/.gitignore
+++ b/.gitignore
@@ -10,4 +10,5 @@ certs
.env
test-instance
*.db
-decode_extradata
\ No newline at end of file
+decode_extradata
+.artifacts
\ No newline at end of file
diff --git a/.vscode/launch.json b/.vscode/launch.json
index 5da6b16..abc3d2d 100644
--- a/.vscode/launch.json
+++ b/.vscode/launch.json
@@ -18,7 +18,7 @@
"env": {
"CHAINLAUNCH_USER": "admin",
"CHAINLAUNCH_PASSWORD": "admin123",
- "JAVA_HOME": "/opt/homebrew/opt/openjdk@21"
+ "JAVA_HOME": "/opt/homebrew/Cellar/openjdk/23.0.2"
}
},
{
diff --git a/.vscode/settings.json b/.vscode/settings.json
new file mode 100644
index 0000000..eb2424e
--- /dev/null
+++ b/.vscode/settings.json
@@ -0,0 +1,3 @@
+{
+ "go.buildTags": "e2e"
+}
\ No newline at end of file
diff --git a/cmd/besu/node/create.go b/cmd/besu/node/create.go
index 857f783..ee5de36 100644
--- a/cmd/besu/node/create.go
+++ b/cmd/besu/node/create.go
@@ -23,6 +23,7 @@ type createCmd struct {
keyID int64
bootNodes []string
networkID int64
+ version string
logger *logger.Logger
}
@@ -116,6 +117,7 @@ func (c *createCmd) run(out *os.File) error {
Env: envVars,
KeyID: c.keyID,
BootNodes: c.bootNodes,
+ Version: c.version,
}
// Create node
@@ -165,6 +167,7 @@ func NewCreateCmd(logger *logger.Logger) *cobra.Command {
flags.StringSliceVar(&c.bootNodes, "boot-nodes", []string{}, "Boot nodes")
flags.Int64Var(&c.networkID, "network-id", 0, "Network ID")
flags.StringVar(&c.name, "name", "", "Name")
+ flags.StringVar(&c.version, "version", "25.5.0", "Version")
// Mark all flags as required
cmd.MarkFlagRequired("p2p-port")
cmd.MarkFlagRequired("rpc-port")
diff --git a/cmd/besu/node/list.go b/cmd/besu/node/list.go
index 2b450e4..b3bd844 100644
--- a/cmd/besu/node/list.go
+++ b/cmd/besu/node/list.go
@@ -1,6 +1,7 @@
package node
import (
+ "encoding/json"
"fmt"
"os"
"text/tabwriter"
@@ -13,6 +14,7 @@ import (
type listCmd struct {
page int
limit int
+ output string // "tsv" or "json"
logger *logger.Logger
}
@@ -27,31 +29,44 @@ func (c *listCmd) run(out *os.File) error {
return fmt.Errorf("failed to list Besu nodes: %w", err)
}
- // Create tab writer
- w := tabwriter.NewWriter(out, 0, 0, 3, ' ', 0)
- fmt.Fprintln(w, "ID\tName\tType\tStatus\tEndpoint")
- fmt.Fprintln(w, "--\t----\t----\t------\t--------")
+ switch c.output {
+ case "json":
+ enc := json.NewEncoder(out)
+ enc.SetIndent("", " ")
+ if err := enc.Encode(nodes.Items); err != nil {
+ return fmt.Errorf("failed to encode nodes as JSON: %w", err)
+ }
+ return nil
+ case "tsv":
+ // Create tab writer
+ w := tabwriter.NewWriter(out, 0, 0, 3, ' ', 0)
+ fmt.Fprintln(w, "ID\tName\tType\tStatus\tRPC\tMetrics\tP2P")
+ fmt.Fprintln(w, "--\t----\t----\t------\t----\t----\t----")
- // Print nodes
- for _, node := range nodes.Items {
- fmt.Fprintf(w, "%d\t%s\t%s\t%s\t%s\n",
- node.ID,
- node.Name,
- node.NodeType,
- node.Status,
- node.Endpoint,
- )
- }
+ // Print nodes
+ for _, node := range nodes.Items {
+ fmt.Fprintf(w, "%d\t%s\t%s\t%s\t%s\t%s\t%s\n",
+ node.ID,
+ node.Name,
+ node.NodeType,
+ node.Status,
+ fmt.Sprintf("%s:%d", node.BesuNode.RPCHost, node.BesuNode.RPCPort),
+ fmt.Sprintf("%s:%d", node.BesuNode.MetricsHost, node.BesuNode.MetricsPort),
+ fmt.Sprintf("%s:%d", node.BesuNode.P2PHost, node.BesuNode.P2PPort),
+ )
+ }
- w.Flush()
+ w.Flush()
- // Print pagination info
- fmt.Printf("\nPage %d of %d (Total: %d)\n", nodes.Page, nodes.PageCount, nodes.Total)
- if nodes.HasNextPage {
- fmt.Println("Use --page to view more results")
+ // Print pagination info
+ fmt.Printf("\nPage %d of %d (Total: %d)\n", nodes.Page, nodes.PageCount, nodes.Total)
+ if nodes.HasNextPage {
+ fmt.Println("Use --page to view more results")
+ }
+ return nil
+ default:
+ return fmt.Errorf("unsupported output type: %s (must be 'tsv' or 'json')", c.output)
}
-
- return nil
}
// NewListCmd returns the list Besu nodes command
@@ -59,6 +74,7 @@ func NewListCmd(logger *logger.Logger) *cobra.Command {
c := &listCmd{
page: 1,
limit: 10,
+ output: "tsv",
logger: logger,
}
@@ -74,6 +90,7 @@ func NewListCmd(logger *logger.Logger) *cobra.Command {
flags := cmd.Flags()
flags.IntVar(&c.page, "page", 1, "Page number")
flags.IntVar(&c.limit, "limit", 10, "Number of items per page")
+ flags.StringVar(&c.output, "output", "tsv", "Output type: tsv or json")
return cmd
}
diff --git a/cmd/common/client.go b/cmd/common/client.go
index 6646dc8..0acf7fc 100644
--- a/cmd/common/client.go
+++ b/cmd/common/client.go
@@ -27,9 +27,9 @@ func NewClientFromEnv() (*Client, error) {
apiURL = defaultAPIURL
}
- username := os.Getenv("CHAINLAUNCH_USERNAME")
+ username := os.Getenv("CHAINLAUNCH_USER")
if username == "" {
- return nil, fmt.Errorf("CHAINLAUNCH_USERNAME environment variable is not set")
+ return nil, fmt.Errorf("CHAINLAUNCH_USER environment variable is not set")
}
password := os.Getenv("CHAINLAUNCH_PASSWORD")
diff --git a/cmd/common/keymanagement.go b/cmd/common/keymanagement.go
new file mode 100644
index 0000000..dd5ff52
--- /dev/null
+++ b/cmd/common/keymanagement.go
@@ -0,0 +1,48 @@
+package common
+
+import (
+ "encoding/json"
+ "fmt"
+
+ "github.com/chainlaunch/chainlaunch/pkg/keymanagement/models"
+)
+
+// CreateKey creates a new cryptographic key using the API and returns the KeyResponse.
+func (c *Client) CreateKey(req *models.CreateKeyRequest) (*models.KeyResponse, error) {
+ resp, err := c.Post("/keys", req)
+ if err != nil {
+ return nil, fmt.Errorf("failed to create key: %w", err)
+ }
+ if err := CheckResponse(resp, 201); err != nil {
+ return nil, err
+ }
+ var keyResp models.KeyResponse
+ body, err := ReadBody(resp)
+ if err != nil {
+ return nil, fmt.Errorf("failed to read response: %w", err)
+ }
+ if err := json.Unmarshal(body, &keyResp); err != nil {
+ return nil, fmt.Errorf("failed to parse response: %w", err)
+ }
+ return &keyResp, nil
+}
+
+// GetKey retrieves a cryptographic key by ID using the API and returns the KeyResponse.
+func (c *Client) GetKey(keyID string) (*models.KeyResponse, error) {
+ resp, err := c.Get(fmt.Sprintf("/keys/%s", keyID))
+ if err != nil {
+ return nil, fmt.Errorf("failed to get key: %w", err)
+ }
+ if err := CheckResponse(resp, 200); err != nil {
+ return nil, err
+ }
+ var keyResp models.KeyResponse
+ body, err := ReadBody(resp)
+ if err != nil {
+ return nil, fmt.Errorf("failed to read response: %w", err)
+ }
+ if err := json.Unmarshal(body, &keyResp); err != nil {
+ return nil, fmt.Errorf("failed to parse response: %w", err)
+ }
+ return &keyResp, nil
+}
diff --git a/cmd/common/networks.go b/cmd/common/networks.go
new file mode 100644
index 0000000..daa282f
--- /dev/null
+++ b/cmd/common/networks.go
@@ -0,0 +1,155 @@
+package common
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/http"
+
+ httptypes "github.com/chainlaunch/chainlaunch/pkg/networks/http"
+)
+
+// CreateFabricNetwork creates a new Fabric network using the REST API
+func (c *Client) CreateFabricNetwork(req *httptypes.CreateFabricNetworkRequest) (*httptypes.NetworkResponse, error) {
+ resp, err := c.Post("/networks/fabric", req)
+ if err != nil {
+ return nil, fmt.Errorf("failed to create fabric network: %w", err)
+ }
+ defer resp.Body.Close()
+ if err := CheckResponse(resp, http.StatusCreated); err != nil {
+ return nil, err
+ }
+ var network httptypes.NetworkResponse
+ if err := json.NewDecoder(resp.Body).Decode(&network); err != nil {
+ return nil, fmt.Errorf("failed to decode response: %w", err)
+ }
+ return &network, nil
+}
+
+// CreateBesuNetwork creates a new Besu network using the API and returns the BesuNetworkResponse.
+func (c *Client) CreateBesuNetwork(req *httptypes.CreateBesuNetworkRequest) (*httptypes.BesuNetworkResponse, error) {
+ resp, err := c.Post("/networks/besu", req)
+ if err != nil {
+ return nil, fmt.Errorf("failed to create besu network: %w", err)
+ }
+ if err := CheckResponse(resp, 200, 201); err != nil {
+ return nil, err
+ }
+ var netResp httptypes.BesuNetworkResponse
+ body, err := ReadBody(resp)
+ if err != nil {
+ return nil, fmt.Errorf("failed to read response: %w", err)
+ }
+ if err := json.Unmarshal(body, &netResp); err != nil {
+ return nil, fmt.Errorf("failed to parse response: %w", err)
+ }
+ return &netResp, nil
+}
+
+// JoinPeerToFabricNetwork joins a peer to a Fabric network using the REST API
+func (c *Client) JoinPeerToFabricNetwork(networkID, peerID int64) (*httptypes.NetworkResponse, error) {
+ path := fmt.Sprintf("/networks/fabric/%d/peers/%d/join", networkID, peerID)
+ resp, err := c.Post(path, nil)
+ if err != nil {
+ return nil, fmt.Errorf("failed to join peer %d to network %d: %w", peerID, networkID, err)
+ }
+ defer resp.Body.Close()
+ if err := CheckResponse(resp, http.StatusOK); err != nil {
+ return nil, err
+ }
+ var network httptypes.NetworkResponse
+ if err := json.NewDecoder(resp.Body).Decode(&network); err != nil {
+ return nil, fmt.Errorf("failed to decode response: %w", err)
+ }
+ return &network, nil
+}
+
+// JoinAllPeersToFabricNetwork joins all peer nodes to a Fabric network
+func (c *Client) JoinAllPeersToFabricNetwork(networkID int64) ([]*httptypes.NetworkResponse, []error) {
+ peersResp, err := c.ListPeerNodes(1, 1000)
+ if err != nil {
+ return nil, []error{fmt.Errorf("failed to list peer nodes: %w", err)}
+ }
+ var results []*httptypes.NetworkResponse
+ var errs []error
+ for _, peer := range peersResp.Items {
+ resp, err := c.JoinPeerToFabricNetwork(networkID, peer.ID)
+ if err != nil {
+ errs = append(errs, fmt.Errorf("peer %d: %w", peer.ID, err))
+ } else {
+ results = append(results, resp)
+ }
+ }
+ return results, errs
+}
+
+// JoinOrdererToFabricNetwork joins an orderer to a Fabric network using the REST API
+func (c *Client) JoinOrdererToFabricNetwork(networkID, ordererID int64) (*httptypes.NetworkResponse, error) {
+ path := fmt.Sprintf("/networks/fabric/%d/orderers/%d/join", networkID, ordererID)
+ resp, err := c.Post(path, nil)
+ if err != nil {
+ return nil, fmt.Errorf("failed to join orderer %d to network %d: %w", ordererID, networkID, err)
+ }
+ defer resp.Body.Close()
+ if err := CheckResponse(resp, http.StatusOK); err != nil {
+ return nil, err
+ }
+ var network httptypes.NetworkResponse
+ if err := json.NewDecoder(resp.Body).Decode(&network); err != nil {
+ return nil, fmt.Errorf("failed to decode response: %w", err)
+ }
+ return &network, nil
+}
+
+// JoinAllOrderersToFabricNetwork joins all orderer nodes to a Fabric network
+func (c *Client) JoinAllOrderersToFabricNetwork(networkID int64) ([]*httptypes.NetworkResponse, []error) {
+ orderersResp, err := c.ListOrdererNodes(1, 1000)
+ if err != nil {
+ return nil, []error{fmt.Errorf("failed to list orderer nodes: %w", err)}
+ }
+ var results []*httptypes.NetworkResponse
+ var errs []error
+ for _, orderer := range orderersResp.Items {
+ resp, err := c.JoinOrdererToFabricNetwork(networkID, orderer.ID)
+ if err != nil {
+ errs = append(errs, fmt.Errorf("orderer %d: %w", orderer.ID, err))
+ } else {
+ results = append(results, resp)
+ }
+ }
+ return results, errs
+}
+
+// ListFabricNetworks lists all Fabric networks
+func (c *Client) ListFabricNetworks() (*httptypes.ListNetworksResponse, error) {
+ path := "/networks/fabric"
+ resp, err := c.Get(path)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+ if err := CheckResponse(resp, http.StatusOK); err != nil {
+ return nil, err
+ }
+ var result httptypes.ListNetworksResponse
+ if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
+ return nil, err
+ }
+ return &result, nil
+}
+
+func (c *Client) ListBesuNetworks() (*httptypes.ListNetworksResponse, error) {
+ path := "/networks/besu"
+ resp, err := c.Get(path)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+ if err := CheckResponse(resp, http.StatusOK); err != nil {
+ return nil, err
+ }
+ var result httptypes.ListNetworksResponse
+ if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
+ return nil, err
+ }
+ return &result, nil
+}
diff --git a/cmd/common/nodes.go b/cmd/common/nodes.go
index 0034267..44ea398 100644
--- a/cmd/common/nodes.go
+++ b/cmd/common/nodes.go
@@ -5,30 +5,12 @@ import (
"fmt"
stdhttp "net/http"
+ httptypes "github.com/chainlaunch/chainlaunch/pkg/nodes/http"
"github.com/chainlaunch/chainlaunch/pkg/nodes/types"
)
-// PaginatedNodesResponse represents a paginated list of nodes
-type PaginatedNodesResponse struct {
- Items []NodeResponse `json:"items"`
- Total int64 `json:"total"`
- Page int `json:"page"`
- PageCount int `json:"pageCount"`
- HasNextPage bool `json:"hasNextPage"`
-}
-
-// NodeResponse represents a node response
-type NodeResponse struct {
- ID int64 `json:"id"`
- Name string `json:"name"`
- NodeType string `json:"nodeType"`
- Status string `json:"status"`
- Endpoint string `json:"endpoint"`
- ErrorMessage string `json:"errorMessage"`
-}
-
// CreatePeerNode creates a new Fabric peer node
-func (c *Client) CreatePeerNode(req *types.FabricPeerConfig) (*NodeResponse, error) {
+func (c *Client) CreatePeerNode(req *types.FabricPeerConfig) (*httptypes.NodeResponse, error) {
body := map[string]interface{}{
"name": req.Name,
"blockchainPlatform": "FABRIC",
@@ -44,7 +26,7 @@ func (c *Client) CreatePeerNode(req *types.FabricPeerConfig) (*NodeResponse, err
return nil, fmt.Errorf("failed to create peer node: %w", err)
}
- var node NodeResponse
+ var node httptypes.NodeResponse
if err := json.NewDecoder(resp.Body).Decode(&node); err != nil {
return nil, fmt.Errorf("failed to decode response: %w", err)
}
@@ -53,7 +35,7 @@ func (c *Client) CreatePeerNode(req *types.FabricPeerConfig) (*NodeResponse, err
}
// CreateOrdererNode creates a new Fabric orderer node
-func (c *Client) CreateOrdererNode(req *types.FabricOrdererConfig) (*NodeResponse, error) {
+func (c *Client) CreateOrdererNode(req *types.FabricOrdererConfig) (*httptypes.NodeResponse, error) {
body := map[string]interface{}{
"name": req.Name,
"blockchainPlatform": "FABRIC",
@@ -69,7 +51,7 @@ func (c *Client) CreateOrdererNode(req *types.FabricOrdererConfig) (*NodeRespons
return nil, fmt.Errorf("failed to create orderer node: %w", err)
}
- var node NodeResponse
+ var node httptypes.NodeResponse
if err := json.NewDecoder(resp.Body).Decode(&node); err != nil {
return nil, fmt.Errorf("failed to decode response: %w", err)
}
@@ -78,7 +60,7 @@ func (c *Client) CreateOrdererNode(req *types.FabricOrdererConfig) (*NodeRespons
}
// CreateBesuNode creates a new Besu node
-func (c *Client) CreateBesuNode(name string, req *types.BesuNodeConfig) (*NodeResponse, error) {
+func (c *Client) CreateBesuNode(name string, req *types.BesuNodeConfig) (*httptypes.NodeResponse, error) {
body := map[string]interface{}{
"name": name,
"blockchainPlatform": "BESU",
@@ -94,7 +76,7 @@ func (c *Client) CreateBesuNode(name string, req *types.BesuNodeConfig) (*NodeRe
return nil, fmt.Errorf("failed to create besu node: %w", err)
}
- var node NodeResponse
+ var node httptypes.NodeResponse
if err := json.NewDecoder(resp.Body).Decode(&node); err != nil {
return nil, fmt.Errorf("failed to decode response: %w", err)
}
@@ -103,7 +85,7 @@ func (c *Client) CreateBesuNode(name string, req *types.BesuNodeConfig) (*NodeRe
}
// ListNodes lists all nodes with optional platform filter
-func (c *Client) ListNodes(platform string, page, limit int) (*PaginatedNodesResponse, error) {
+func (c *Client) ListNodes(platform string, page, limit int) (*httptypes.ListNodesResponse, error) {
path := "/nodes"
if platform != "" {
path = fmt.Sprintf("/nodes/platform/%s", platform)
@@ -125,7 +107,7 @@ func (c *Client) ListNodes(platform string, page, limit int) (*PaginatedNodesRes
return nil, fmt.Errorf("failed to list nodes: %w", err)
}
- var nodes PaginatedNodesResponse
+ var nodes httptypes.ListNodesResponse
if err := json.NewDecoder(resp.Body).Decode(&nodes); err != nil {
return nil, fmt.Errorf("failed to decode response: %w", err)
}
@@ -134,14 +116,14 @@ func (c *Client) ListNodes(platform string, page, limit int) (*PaginatedNodesRes
}
// ListPeerNodes lists all Fabric peer nodes
-func (c *Client) ListPeerNodes(page, limit int) (*PaginatedNodesResponse, error) {
+func (c *Client) ListPeerNodes(page, limit int) (*httptypes.ListNodesResponse, error) {
nodes, err := c.ListNodes("FABRIC", page, limit)
if err != nil {
return nil, err
}
// Filter only peer nodes
- var peerNodes PaginatedNodesResponse
+ var peerNodes httptypes.ListNodesResponse
for _, node := range nodes.Items {
if node.NodeType == "FABRIC_PEER" {
peerNodes.Items = append(peerNodes.Items, node)
@@ -156,14 +138,14 @@ func (c *Client) ListPeerNodes(page, limit int) (*PaginatedNodesResponse, error)
}
// ListOrdererNodes lists all Fabric orderer nodes
-func (c *Client) ListOrdererNodes(page, limit int) (*PaginatedNodesResponse, error) {
+func (c *Client) ListOrdererNodes(page, limit int) (*httptypes.ListNodesResponse, error) {
nodes, err := c.ListNodes("FABRIC", page, limit)
if err != nil {
return nil, err
}
// Filter only orderer nodes
- var ordererNodes PaginatedNodesResponse
+ var ordererNodes httptypes.ListNodesResponse
for _, node := range nodes.Items {
if node.NodeType == "FABRIC_ORDERER" {
ordererNodes.Items = append(ordererNodes.Items, node)
@@ -178,14 +160,14 @@ func (c *Client) ListOrdererNodes(page, limit int) (*PaginatedNodesResponse, err
}
// ListBesuNodes lists all Besu nodes
-func (c *Client) ListBesuNodes(page, limit int) (*PaginatedNodesResponse, error) {
+func (c *Client) ListBesuNodes(page, limit int) (*httptypes.ListNodesResponse, error) {
nodes, err := c.ListNodes("BESU", page, limit)
if err != nil {
return nil, err
}
// Filter only Besu nodes
- var besuNodes PaginatedNodesResponse
+ var besuNodes httptypes.ListNodesResponse
for _, node := range nodes.Items {
if node.NodeType == "BESU_FULLNODE" {
besuNodes.Items = append(besuNodes.Items, node)
@@ -214,7 +196,7 @@ func (c *Client) DeleteNode(id int64) error {
}
// UpdatePeerNode updates a Fabric peer node
-func (c *Client) UpdatePeerNode(id int64, req *types.FabricPeerConfig) (*NodeResponse, error) {
+func (c *Client) UpdatePeerNode(id int64, req *types.FabricPeerConfig) (*httptypes.NodeResponse, error) {
body := map[string]interface{}{
"blockchainPlatform": "FABRIC",
"fabricPeer": req,
@@ -229,7 +211,7 @@ func (c *Client) UpdatePeerNode(id int64, req *types.FabricPeerConfig) (*NodeRes
return nil, fmt.Errorf("failed to update peer node: %w", err)
}
- var node NodeResponse
+ var node httptypes.NodeResponse
if err := json.NewDecoder(resp.Body).Decode(&node); err != nil {
return nil, fmt.Errorf("failed to decode response: %w", err)
}
@@ -238,7 +220,7 @@ func (c *Client) UpdatePeerNode(id int64, req *types.FabricPeerConfig) (*NodeRes
}
// UpdateOrdererNode updates a Fabric orderer node
-func (c *Client) UpdateOrdererNode(id int64, req *types.FabricOrdererConfig) (*NodeResponse, error) {
+func (c *Client) UpdateOrdererNode(id int64, req *types.FabricOrdererConfig) (*httptypes.NodeResponse, error) {
body := map[string]interface{}{
"blockchainPlatform": "FABRIC",
"fabricOrderer": req,
@@ -253,7 +235,7 @@ func (c *Client) UpdateOrdererNode(id int64, req *types.FabricOrdererConfig) (*N
return nil, fmt.Errorf("failed to update orderer node: %w", err)
}
- var node NodeResponse
+ var node httptypes.NodeResponse
if err := json.NewDecoder(resp.Body).Decode(&node); err != nil {
return nil, fmt.Errorf("failed to decode response: %w", err)
}
@@ -262,7 +244,7 @@ func (c *Client) UpdateOrdererNode(id int64, req *types.FabricOrdererConfig) (*N
}
// UpdateBesuNode updates a Besu node
-func (c *Client) UpdateBesuNode(id int64, req *types.BesuNodeConfig) (*NodeResponse, error) {
+func (c *Client) UpdateBesuNode(id int64, req *types.BesuNodeConfig) (*httptypes.NodeResponse, error) {
body := map[string]interface{}{
"blockchainPlatform": "BESU",
"besuNode": req,
@@ -277,7 +259,26 @@ func (c *Client) UpdateBesuNode(id int64, req *types.BesuNodeConfig) (*NodeRespo
return nil, fmt.Errorf("failed to update besu node: %w", err)
}
- var node NodeResponse
+ var node httptypes.NodeResponse
+ if err := json.NewDecoder(resp.Body).Decode(&node); err != nil {
+ return nil, fmt.Errorf("failed to decode response: %w", err)
+ }
+
+ return &node, nil
+}
+
+// GetNode gets a node by ID
+func (c *Client) GetNode(id int64) (*httptypes.NodeResponse, error) {
+ resp, err := c.Get(fmt.Sprintf("/nodes/%d", id))
+ if err != nil {
+ return nil, fmt.Errorf("failed to get node: %w", err)
+ }
+
+ if err := CheckResponse(resp, stdhttp.StatusOK); err != nil {
+ return nil, fmt.Errorf("failed to get node: %w", err)
+ }
+
+ var node httptypes.NodeResponse
if err := json.NewDecoder(resp.Body).Decode(&node); err != nil {
return nil, fmt.Errorf("failed to decode response: %w", err)
}
diff --git a/cmd/common/orgs.go b/cmd/common/orgs.go
new file mode 100644
index 0000000..334e78f
--- /dev/null
+++ b/cmd/common/orgs.go
@@ -0,0 +1,41 @@
+package common
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/http"
+
+ types "github.com/chainlaunch/chainlaunch/pkg/fabric/handler"
+)
+
+func (c *Client) CreateOrganization(req types.CreateOrganizationRequest) (*types.OrganizationResponse, error) {
+ resp, err := c.Post("/organizations", req)
+ if err != nil {
+ return nil, fmt.Errorf("failed to create organization: %w", err)
+ }
+ defer resp.Body.Close()
+ if err := CheckResponse(resp, http.StatusCreated); err != nil {
+ return nil, err
+ }
+ var orgResp types.OrganizationResponse
+ if err := json.NewDecoder(resp.Body).Decode(&orgResp); err != nil {
+ return nil, fmt.Errorf("failed to decode organization response: %w", err)
+ }
+ return &orgResp, nil
+}
+
+func (c *Client) ListOrganizations() ([]types.OrganizationResponse, error) {
+ resp, err := c.Get("/organizations")
+ if err != nil {
+ return nil, fmt.Errorf("failed to list organizations: %w", err)
+ }
+ defer resp.Body.Close()
+ if err := CheckResponse(resp, http.StatusOK); err != nil {
+ return nil, err
+ }
+ var orgs []types.OrganizationResponse
+ if err := json.NewDecoder(resp.Body).Decode(&orgs); err != nil {
+ return nil, fmt.Errorf("failed to decode organizations list: %w", err)
+ }
+ return orgs, nil
+}
diff --git a/cmd/fabric/install/install.go b/cmd/fabric/install/install.go
index 12356cf..47e166c 100644
--- a/cmd/fabric/install/install.go
+++ b/cmd/fabric/install/install.go
@@ -12,9 +12,9 @@ import (
"path/filepath"
"github.com/chainlaunch/chainlaunch/pkg/logger"
- "google.golang.org/protobuf/proto"
"github.com/pkg/errors"
"google.golang.org/grpc"
+ "google.golang.org/protobuf/proto"
"io"
"strings"
@@ -32,7 +32,7 @@ import (
type installCmd struct {
chaincode string
channel string
- networkConfig string
+ networkConfigs []string
users []string
organizations []string
signaturePolicy string
@@ -64,10 +64,14 @@ func (c *installCmd) getPeerAndIdentityForOrg(nc *networkconfig.NetworkConfig, o
if !ok {
return nil, nil, fmt.Errorf("user %s not found in network config", userID)
}
+
+ // Get user certificate
userCert, err := gwidentity.CertificateFromPEM([]byte(user.Cert.PEM))
if err != nil {
return nil, nil, errors.Wrapf(err, "failed to read user certificate for user %s and org %s", userID, org)
}
+
+ // Get user private key
userPrivateKey, err := gwidentity.PrivateKeyFromPEM([]byte(user.Key.PEM))
if err != nil {
return nil, nil, errors.Wrapf(err, "failed to read user private key for user %s and org %s", userID, org)
@@ -119,28 +123,40 @@ func (c installCmd) start() error {
}
_ = pkg
packageID := chaincode.GetPackageID(label, pkg)
- c.logger.Infof("packageID: %s", packageID)
- nc, err := networkconfig.LoadFromFile(c.networkConfig)
- if err != nil {
- return err
+
+ // Load network configs
+ networkConfigs := make([]*networkconfig.NetworkConfig, len(c.organizations))
+ if len(c.networkConfigs) == 0 {
+ return fmt.Errorf("at least one network config file is required")
}
- // // install chaincode in peers
- // configBackend := config.FromFile(c.networkConfig)
-
- // clientsMap := map[string]*resmgmt.Client{}
- // sdk, err := fabsdk.New(configBackend)
- // if err != nil {
- // return err
- // }
- // for idx, mspID := range c.organizations {
- // clientContext := sdk.Context(fabsdk.WithUser(c.users[idx]), fabsdk.WithOrg(mspID))
- // clientsMap[mspID], err = resmgmt.New(clientContext)
- // if err != nil {
- // return err
- // }
- // }
+ // If only one config is provided, use it for all organizations
+ if len(c.networkConfigs) == 1 {
+ nc, err := networkconfig.LoadFromFile(c.networkConfigs[0])
+ if err != nil {
+ return err
+ }
+ for i := range c.organizations {
+ networkConfigs[i] = nc
+ }
+ } else {
+ // If multiple configs are provided, validate the count matches organizations
+ if len(c.networkConfigs) != len(c.organizations) {
+ return fmt.Errorf("number of network configs (%d) must match number of organizations (%d)", len(c.networkConfigs), len(c.organizations))
+ }
+ // Load each config
+ for i, configPath := range c.networkConfigs {
+ nc, err := networkconfig.LoadFromFile(configPath)
+ if err != nil {
+ return fmt.Errorf("failed to load network config %s: %w", configPath, err)
+ }
+ networkConfigs[i] = nc
+ }
+ }
+
+ // Install chaincode in peers
for idx, org := range c.organizations {
+ nc := networkConfigs[idx]
orgConfig, ok := nc.Organizations[org]
if !ok {
return fmt.Errorf("organization %s not found in network config", org)
@@ -168,10 +184,6 @@ func (c installCmd) start() error {
}
}
- // sp, err := policydsl.FromString(c.signaturePolicy)
- // if err != nil {
- // return err
- // }
applicationPolicy, err := chaincode.NewApplicationPolicy(c.signaturePolicy, "")
if err != nil {
return err
@@ -181,6 +193,7 @@ func (c installCmd) start() error {
sequence := 1
allOrgGateways := []*chaincode.Gateway{}
for idx, org := range c.organizations {
+ nc := networkConfigs[idx]
orgConfig, ok := nc.Organizations[org]
if !ok {
return fmt.Errorf("organization %s not found in network config", org)
@@ -208,7 +221,6 @@ func (c installCmd) start() error {
var collections []*pb.CollectionConfig
if c.pdcFile != "" {
- //
pdcBytes, err := ioutil.ReadFile(c.pdcFile)
if err != nil {
return err
@@ -277,18 +289,6 @@ func (c installCmd) start() error {
c.logger.Infof("Chaincode already committed, version=%s sequence=%d", version, sequence)
}
c.logger.Infof("Should commit=%v", shouldCommit)
- // // approve chaincode in orgs
- // approveCCRequest := resmgmt.LifecycleApproveCCRequest{
- // Name: label,
- // Version: version,
- // PackageID: packageID,
- // Sequence: int64(sequence),
- // CollectionConfig: collections,
- // EndorsementPlugin: "escc",
- // ValidationPlugin: "vscc",
- // SignaturePolicy: sp,
- // InitRequired: false,
- // }
chaincodeDef := &chaincode.Definition{
ChannelName: c.channel,
@@ -305,17 +305,17 @@ func (c installCmd) start() error {
for idx, gateway := range allOrgGateways {
err := gateway.Approve(ctx, chaincodeDef)
if err != nil {
- c.logger.Errorf("Error when approving chaincode: %v", err)
- return err
- }
- if err != nil && !strings.Contains(err.Error(), "redefine uncommitted") {
- c.logger.Errorf("Error when approving chaincode: %v", err)
- return err
+ if strings.Contains(err.Error(), "redefine uncommitted") {
+ c.logger.Infof("Chaincode already committed, org=%s", c.organizations[idx])
+ } else {
+ c.logger.Errorf("Error when approving chaincode: %v", err)
+ return err
+ }
}
+
c.logger.Infof("Chaincode approved, org=%s", c.organizations[idx])
}
if shouldCommit {
-
// commit chaincode in orgs
err := firstGateway.Commit(
ctx,
@@ -326,7 +326,6 @@ func (c installCmd) start() error {
return err
}
c.logger.Infof("Chaincode committed")
-
}
if c.envFile != "" {
@@ -546,7 +545,7 @@ func NewInstallCmd(logger *logger.Logger) *cobra.Command {
f := cmd.Flags()
f.StringVar(&c.chaincode, "chaincode", "", "chaincode name within the channel")
f.StringVar(&c.channel, "channel", "", "Channel name")
- f.StringVar(&c.networkConfig, "config", "", "Network config file")
+ f.StringArrayVar(&c.networkConfigs, "config", []string{}, "Network config files (one per organization, if only one is provided it will be used for all organizations)")
f.StringVar(&c.signaturePolicy, "policy", "", "Signature policy for the chaincode")
f.StringArrayVarP(&c.organizations, "organizations", "o", []string{}, "Organizations to connect to ")
f.StringArrayVarP(&c.users, "users", "u", []string{}, "Users to use")
diff --git a/cmd/fabric/orderer/list.go b/cmd/fabric/orderer/list.go
index ef27cfd..ea7ca51 100644
--- a/cmd/fabric/orderer/list.go
+++ b/cmd/fabric/orderer/list.go
@@ -1,6 +1,7 @@
package orderer
import (
+ "encoding/json"
"fmt"
"io"
"os"
@@ -14,6 +15,7 @@ import (
type listCmd struct {
page int
limit int
+ output string // "tsv" or "json"
logger *logger.Logger
}
@@ -29,20 +31,31 @@ func (c *listCmd) run(out io.Writer) error {
return fmt.Errorf("failed to list orderer nodes: %w", err)
}
- // Print nodes in table format
- w := tabwriter.NewWriter(out, 0, 0, 3, ' ', tabwriter.TabIndent)
- fmt.Fprintln(w, "ID\tNAME\tSTATUS\tENDPOINT")
- for _, node := range nodes.Items {
- fmt.Fprintf(w, "%d\t%s\t%s\t%s\n", node.ID, node.Name, node.Status, node.Endpoint)
+ switch c.output {
+ case "json":
+ enc := json.NewEncoder(out)
+ enc.SetIndent("", " ")
+ if err := enc.Encode(nodes.Items); err != nil {
+ return fmt.Errorf("failed to encode orderer nodes as JSON: %w", err)
+ }
+ return nil
+ case "tsv":
+ w := tabwriter.NewWriter(out, 0, 0, 3, ' ', tabwriter.TabIndent)
+ fmt.Fprintln(w, "ID\tNAME\tSTATUS\tENDPOINT")
+ for _, node := range nodes.Items {
+ fmt.Fprintf(w, "%d\t%s\t%s\t%s\n", node.ID, node.Name, node.Status, node.Endpoint)
+ }
+ w.Flush()
+ return nil
+ default:
+ return fmt.Errorf("unsupported output type: %s (must be 'tsv' or 'json')", c.output)
}
- w.Flush()
-
- return nil
}
// NewListCmd returns the list orderers command
func NewListCmd(logger *logger.Logger) *cobra.Command {
c := &listCmd{
+ output: "tsv",
logger: logger,
}
@@ -58,6 +71,7 @@ func NewListCmd(logger *logger.Logger) *cobra.Command {
flags := cmd.Flags()
flags.IntVar(&c.page, "page", 1, "Page number")
flags.IntVar(&c.limit, "limit", 10, "Number of items per page")
+ flags.StringVar(&c.output, "output", "tsv", "Output type: tsv or json")
return cmd
}
diff --git a/cmd/fabric/org/client.go b/cmd/fabric/org/client.go
index 2d872bd..3a90e19 100644
--- a/cmd/fabric/org/client.go
+++ b/cmd/fabric/org/client.go
@@ -56,16 +56,12 @@ func (cw *ClientWrapper) CreateOrganization(name, mspID string, providerID int64
}
// ListOrganizations lists all organizations
-func (cw *ClientWrapper) ListOrganizations() ([]client.Organization, error) {
+func (cw *ClientWrapper) ListOrganizations() (*client.PaginatedOrganizationsResponse, error) {
orgs, err := cw.client.ListOrganizations()
if err != nil {
return nil, fmt.Errorf("failed to list organizations: %w", err)
}
- if len(orgs) == 0 {
- return nil, fmt.Errorf("no organizations found")
- }
-
return orgs, nil
}
diff --git a/cmd/fabric/org/list.go b/cmd/fabric/org/list.go
index 96e8961..91b4454 100644
--- a/cmd/fabric/org/list.go
+++ b/cmd/fabric/org/list.go
@@ -1,16 +1,19 @@
package org
import (
+ "encoding/json"
+ "fmt"
"io"
"os"
+ "text/tabwriter"
"time"
"github.com/chainlaunch/chainlaunch/pkg/logger"
- "github.com/olekukonko/tablewriter"
"github.com/spf13/cobra"
)
type listCmd struct {
+ output string // "tsv" or "json"
logger *logger.Logger
}
@@ -23,39 +26,36 @@ func (c *listCmd) run(out io.Writer) error {
return err
}
- // Create table writer
- table := tablewriter.NewWriter(out)
- table.SetHeader([]string{"MSP ID", "Created At", "Description"})
-
- // Configure table style
- table.SetAutoWrapText(false)
- table.SetAutoFormatHeaders(true)
- table.SetHeaderAlignment(tablewriter.ALIGN_LEFT)
- table.SetAlignment(tablewriter.ALIGN_LEFT)
- table.SetCenterSeparator("")
- table.SetColumnSeparator("")
- table.SetRowSeparator("-")
- table.SetHeaderLine(true)
- table.SetBorder(false)
- table.SetTablePadding("\t")
- table.SetNoWhiteSpace(true)
-
- // Add data to table
- for _, org := range orgs {
- table.Append([]string{
- org.MspID,
- org.CreatedAt.Format(time.RFC3339),
- org.Description,
- })
+ switch c.output {
+ case "json":
+ enc := json.NewEncoder(out)
+ enc.SetIndent("", " ")
+ if err := enc.Encode(orgs.Items); err != nil {
+ return fmt.Errorf("failed to encode organizations as JSON: %w", err)
+ }
+ return nil
+ case "tsv":
+ w := tabwriter.NewWriter(out, 0, 0, 3, ' ', 0)
+ fmt.Fprintln(w, "MSP ID\tCreated At\tDescription")
+ fmt.Fprintln(w, "------\t----------\t-----------")
+ for _, org := range orgs.Items {
+ fmt.Fprintf(w, "%s\t%s\t%s\n",
+ org.MspID,
+ org.CreatedAt.Format(time.RFC3339),
+ org.Description,
+ )
+ }
+ w.Flush()
+ return nil
+ default:
+ return fmt.Errorf("unsupported output type: %s (must be 'tsv' or 'json')", c.output)
}
-
- table.Render()
- return nil
}
// NewListCmd returns the list organizations command
func NewListCmd(logger *logger.Logger) *cobra.Command {
c := &listCmd{
+ output: "tsv",
logger: logger,
}
@@ -68,5 +68,8 @@ func NewListCmd(logger *logger.Logger) *cobra.Command {
},
}
+ flags := cmd.Flags()
+ flags.StringVar(&c.output, "output", "tsv", "Output type: tsv or json")
+
return cmd
}
diff --git a/cmd/fabric/peer/list.go b/cmd/fabric/peer/list.go
index 5069e7c..2d61881 100644
--- a/cmd/fabric/peer/list.go
+++ b/cmd/fabric/peer/list.go
@@ -1,6 +1,7 @@
package peer
import (
+ "encoding/json"
"fmt"
"io"
"os"
@@ -14,6 +15,7 @@ import (
type listCmd struct {
page int
limit int
+ output string // "tsv" or "json"
logger *logger.Logger
}
@@ -29,20 +31,31 @@ func (c *listCmd) run(out io.Writer) error {
return fmt.Errorf("failed to list peer nodes: %w", err)
}
- // Print nodes in table format
- w := tabwriter.NewWriter(out, 0, 0, 3, ' ', tabwriter.TabIndent)
- fmt.Fprintln(w, "ID\tNAME\tSTATUS\tENDPOINT")
- for _, node := range nodes.Items {
- fmt.Fprintf(w, "%d\t%s\t%s\t%s\n", node.ID, node.Name, node.Status, node.Endpoint)
+ switch c.output {
+ case "json":
+ enc := json.NewEncoder(out)
+ enc.SetIndent("", " ")
+ if err := enc.Encode(nodes.Items); err != nil {
+ return fmt.Errorf("failed to encode peer nodes as JSON: %w", err)
+ }
+ return nil
+ case "tsv":
+ w := tabwriter.NewWriter(out, 0, 0, 3, ' ', tabwriter.TabIndent)
+ fmt.Fprintln(w, "ID\tNAME\tSTATUS\tENDPOINT")
+ for _, node := range nodes.Items {
+ fmt.Fprintf(w, "%d\t%s\t%s\t%s\n", node.ID, node.Name, node.Status, node.Endpoint)
+ }
+ w.Flush()
+ return nil
+ default:
+ return fmt.Errorf("unsupported output type: %s (must be 'tsv' or 'json')", c.output)
}
- w.Flush()
-
- return nil
}
// NewListCmd returns the list peers command
func NewListCmd(logger *logger.Logger) *cobra.Command {
c := &listCmd{
+ output: "tsv",
logger: logger,
}
@@ -58,6 +71,7 @@ func NewListCmd(logger *logger.Logger) *cobra.Command {
flags := cmd.Flags()
flags.IntVar(&c.page, "page", 1, "Page number")
flags.IntVar(&c.limit, "limit", 10, "Number of items per page")
+ flags.StringVar(&c.output, "output", "tsv", "Output type: tsv or json")
return cmd
}
diff --git a/cmd/networks/besu/besu.go b/cmd/networks/besu/besu.go
index 66adb26..d3e2d7d 100644
--- a/cmd/networks/besu/besu.go
+++ b/cmd/networks/besu/besu.go
@@ -16,6 +16,7 @@ func NewBesuCmd(logger *logger.Logger) *cobra.Command {
rootCmd.AddCommand(
newCreateCmd(logger),
newUpdateCmd(logger),
+ NewListCmd(logger),
)
return rootCmd
diff --git a/cmd/networks/besu/list.go b/cmd/networks/besu/list.go
new file mode 100644
index 0000000..be880fb
--- /dev/null
+++ b/cmd/networks/besu/list.go
@@ -0,0 +1,51 @@
+package besu
+
+import (
+ "encoding/json"
+ "fmt"
+ "os"
+ "strings"
+
+ "github.com/chainlaunch/chainlaunch/cmd/common"
+ "github.com/chainlaunch/chainlaunch/pkg/logger"
+ "github.com/spf13/cobra"
+)
+
+// NewListCmd returns a command that lists all Besu networks
+func NewListCmd(logger *logger.Logger) *cobra.Command {
+ var output string
+ cmd := &cobra.Command{
+ Use: "list",
+ Short: "List all Besu networks",
+ RunE: func(cmd *cobra.Command, args []string) error {
+ client, err := common.NewClientFromEnv()
+ if err != nil {
+ return err
+ }
+ result, err := client.ListBesuNetworks()
+ if err != nil {
+ return err
+ }
+
+ switch strings.ToLower(output) {
+ case "json":
+ enc := json.NewEncoder(os.Stdout)
+ enc.SetIndent("", " ")
+ return enc.Encode(result)
+ case "tsv":
+ fmt.Println("ID\tName\tStatus\tCreatedAt")
+ for _, n := range result.Networks {
+ fmt.Printf("%d\t%s\t%s\t%s\n", n.ID, n.Name, n.Status, n.CreatedAt)
+ }
+ return nil
+ default: // table
+ for _, n := range result.Networks {
+ fmt.Printf("ID: %d | Name: %s | Status: %s | CreatedAt: %s\n", n.ID, n.Name, n.Status, n.CreatedAt)
+ }
+ return nil
+ }
+ },
+ }
+ cmd.Flags().StringVarP(&output, "output", "o", "table", "Output format: table, json, or tsv")
+ return cmd
+}
diff --git a/cmd/networks/fabric/fabric.go b/cmd/networks/fabric/fabric.go
index 94fec16..1da1fc6 100644
--- a/cmd/networks/fabric/fabric.go
+++ b/cmd/networks/fabric/fabric.go
@@ -1,8 +1,12 @@
package fabric
import (
+ "fmt"
+
+ "github.com/chainlaunch/chainlaunch/cmd/common"
"github.com/chainlaunch/chainlaunch/pkg/logger"
"github.com/spf13/cobra"
+ // for NewListCmd
)
// NewFabricCmd returns the fabric command
@@ -16,7 +20,130 @@ func NewFabricCmd(logger *logger.Logger) *cobra.Command {
rootCmd.AddCommand(
newCreateCmd(logger),
newUpdateCmd(logger),
+ newJoinCmd(logger),
+ newJoinAllCmd(logger),
+ newJoinOrdererCmd(logger),
+ newJoinAllOrderersCmd(logger),
+ NewListCmd(logger),
)
return rootCmd
}
+
+func newJoinCmd(logger *logger.Logger) *cobra.Command {
+ var networkID int64
+ var peerID int64
+
+ cmd := &cobra.Command{
+ Use: "join",
+ Short: "Join a peer to a Fabric network",
+ RunE: func(cmd *cobra.Command, args []string) error {
+ client, err := common.NewClientFromEnv()
+ if err != nil {
+ return fmt.Errorf("failed to create client: %w", err)
+ }
+ resp, err := client.JoinPeerToFabricNetwork(networkID, peerID)
+ if err != nil {
+ return fmt.Errorf("failed to join peer %d to network %d: %w", peerID, networkID, err)
+ }
+ fmt.Printf("Peer %d joined network %d successfully.\n", peerID, networkID)
+ fmt.Printf("Network ID: %d\n", resp.ID)
+ fmt.Printf("Status: %s\n", resp.Status)
+ return nil
+ },
+ }
+ cmd.Flags().Int64Var(&networkID, "network-id", 0, "Fabric network ID")
+ cmd.Flags().Int64Var(&peerID, "peer-id", 0, "Peer node ID")
+ cmd.MarkFlagRequired("network-id")
+ cmd.MarkFlagRequired("peer-id")
+ return cmd
+}
+
+func newJoinAllCmd(logger *logger.Logger) *cobra.Command {
+ var networkID int64
+
+ cmd := &cobra.Command{
+ Use: "join-all",
+ Short: "Join all peers to a Fabric network",
+ RunE: func(cmd *cobra.Command, args []string) error {
+ client, err := common.NewClientFromEnv()
+ if err != nil {
+ return fmt.Errorf("failed to create client: %w", err)
+ }
+ results, errs := client.JoinAllPeersToFabricNetwork(networkID)
+ for _, resp := range results {
+ fmt.Printf("Peer joined network %d successfully. Network ID: %d, Status: %s\n", networkID, resp.ID, resp.Status)
+ }
+ if len(errs) > 0 {
+ fmt.Println("Errors occurred while joining some peers:")
+ for _, err := range errs {
+ fmt.Printf(" %v\n", err)
+ }
+ return fmt.Errorf("some peers failed to join the network")
+ }
+ return nil
+ },
+ }
+ cmd.Flags().Int64Var(&networkID, "network-id", 0, "Fabric network ID")
+ cmd.MarkFlagRequired("network-id")
+ return cmd
+}
+
+func newJoinOrdererCmd(logger *logger.Logger) *cobra.Command {
+ var networkID int64
+ var ordererID int64
+
+ cmd := &cobra.Command{
+ Use: "join-orderer",
+ Short: "Join an orderer to a Fabric network",
+ RunE: func(cmd *cobra.Command, args []string) error {
+ client, err := common.NewClientFromEnv()
+ if err != nil {
+ return fmt.Errorf("failed to create client: %w", err)
+ }
+ resp, err := client.JoinOrdererToFabricNetwork(networkID, ordererID)
+ if err != nil {
+ return fmt.Errorf("failed to join orderer %d to network %d: %w", ordererID, networkID, err)
+ }
+ fmt.Printf("Orderer %d joined network %d successfully.\n", ordererID, networkID)
+ fmt.Printf("Network ID: %d\n", resp.ID)
+ fmt.Printf("Status: %s\n", resp.Status)
+ return nil
+ },
+ }
+ cmd.Flags().Int64Var(&networkID, "network-id", 0, "Fabric network ID")
+ cmd.Flags().Int64Var(&ordererID, "orderer-id", 0, "Orderer node ID")
+ cmd.MarkFlagRequired("network-id")
+ cmd.MarkFlagRequired("orderer-id")
+ return cmd
+}
+
+func newJoinAllOrderersCmd(logger *logger.Logger) *cobra.Command {
+ var networkID int64
+
+ cmd := &cobra.Command{
+ Use: "join-all-orderers",
+ Short: "Join all orderers to a Fabric network",
+ RunE: func(cmd *cobra.Command, args []string) error {
+ client, err := common.NewClientFromEnv()
+ if err != nil {
+ return fmt.Errorf("failed to create client: %w", err)
+ }
+ results, errs := client.JoinAllOrderersToFabricNetwork(networkID)
+ for _, resp := range results {
+ fmt.Printf("Orderer joined network %d successfully. Network ID: %d, Status: %s\n", networkID, resp.ID, resp.Status)
+ }
+ if len(errs) > 0 {
+ fmt.Println("Errors occurred while joining some orderers:")
+ for _, err := range errs {
+ fmt.Printf(" %v\n", err)
+ }
+ return fmt.Errorf("some orderers failed to join the network")
+ }
+ return nil
+ },
+ }
+ cmd.Flags().Int64Var(&networkID, "network-id", 0, "Fabric network ID")
+ cmd.MarkFlagRequired("network-id")
+ return cmd
+}
diff --git a/cmd/networks/fabric/list.go b/cmd/networks/fabric/list.go
new file mode 100644
index 0000000..1cc46ae
--- /dev/null
+++ b/cmd/networks/fabric/list.go
@@ -0,0 +1,51 @@
+package fabric
+
+import (
+ "encoding/json"
+ "fmt"
+ "os"
+ "strings"
+
+ "github.com/chainlaunch/chainlaunch/cmd/common"
+ "github.com/chainlaunch/chainlaunch/pkg/logger"
+ "github.com/spf13/cobra"
+)
+
+// NewListCmd returns a command that lists all Fabric networks
+func NewListCmd(logger *logger.Logger) *cobra.Command {
+ var output string
+ cmd := &cobra.Command{
+ Use: "list",
+ Short: "List all Fabric networks",
+ RunE: func(cmd *cobra.Command, args []string) error {
+ client, err := common.NewClientFromEnv()
+ if err != nil {
+ return err
+ }
+ result, err := client.ListFabricNetworks()
+ if err != nil {
+ return err
+ }
+
+ switch strings.ToLower(output) {
+ case "json":
+ enc := json.NewEncoder(os.Stdout)
+ enc.SetIndent("", " ")
+ return enc.Encode(result)
+ case "tsv":
+ fmt.Println("ID\tName\tStatus\tCreatedAt")
+ for _, n := range result.Networks {
+ fmt.Printf("%d\t%s\t%s\t%s\n", n.ID, n.Name, n.Status, n.CreatedAt)
+ }
+ return nil
+ default: // table
+ for _, n := range result.Networks {
+ fmt.Printf("ID: %d | Name: %s | Status: %s | CreatedAt: %s\n", n.ID, n.Name, n.Status, n.CreatedAt)
+ }
+ return nil
+ }
+ },
+ }
+ cmd.Flags().StringVarP(&output, "output", "o", "table", "Output format: table, json, or tsv")
+ return cmd
+}
diff --git a/cmd/root.go b/cmd/root.go
index 17a658b..a455007 100644
--- a/cmd/root.go
+++ b/cmd/root.go
@@ -10,10 +10,13 @@ import (
"github.com/chainlaunch/chainlaunch/cmd/keymanagement"
"github.com/chainlaunch/chainlaunch/cmd/networks"
"github.com/chainlaunch/chainlaunch/cmd/serve"
+ "github.com/chainlaunch/chainlaunch/cmd/testnet"
"github.com/chainlaunch/chainlaunch/cmd/version"
"github.com/chainlaunch/chainlaunch/config"
"github.com/chainlaunch/chainlaunch/pkg/logger"
"github.com/spf13/cobra"
+ // Add this import for the testnet command
+ // import "lfdt-chainlaunch/cmd/testnet"
)
// rootCmd represents the base command when called without any subcommands
@@ -33,5 +36,8 @@ func NewRootCmd(configCMD config.ConfigCMD) *cobra.Command {
rootCmd.AddCommand(besu.NewBesuCmd(logger))
rootCmd.AddCommand(networks.NewNetworksCmd(logger))
rootCmd.AddCommand(keymanagement.NewKeyManagementCmd())
+ rootCmd.AddCommand(testnet.NewTestnetCmd())
+ // In the function where rootCmd is defined and commands are added:
+ // rootCmd.AddCommand(testnet.NewTestnetCmd())
return rootCmd
}
diff --git a/cmd/serve/serve.go b/cmd/serve/serve.go
index 2e66793..34c73d3 100644
--- a/cmd/serve/serve.go
+++ b/cmd/serve/serve.go
@@ -28,9 +28,12 @@ import (
"github.com/chainlaunch/chainlaunch/pkg/keymanagement/handler"
"github.com/chainlaunch/chainlaunch/pkg/keymanagement/service"
"github.com/chainlaunch/chainlaunch/pkg/logger"
+ metricscommon "github.com/chainlaunch/chainlaunch/pkg/metrics/common"
"github.com/chainlaunch/chainlaunch/pkg/monitoring"
nodeTypes "github.com/chainlaunch/chainlaunch/pkg/nodes/types"
+ "github.com/chainlaunch/chainlaunch/pkg/audit"
+ "github.com/chainlaunch/chainlaunch/pkg/metrics"
networkshttp "github.com/chainlaunch/chainlaunch/pkg/networks/http"
networksservice "github.com/chainlaunch/chainlaunch/pkg/networks/service"
nodeshttp "github.com/chainlaunch/chainlaunch/pkg/nodes/http"
@@ -282,6 +285,8 @@ func setupServer(queries *db.Queries, authService *auth.AuthService, views embed
organizationService := fabricservice.NewOrganizationService(queries, keyManagementService, configService)
logger := logger.NewDefault()
+ auditService := audit.NewService(queries, 10)
+
nodeEventService := nodesservice.NewNodeEventService(queries, logger)
settingsService := settingsservice.NewSettingsService(queries, logger)
_, err = settingsService.InitializeDefaultSettings(context.Background())
@@ -290,7 +295,16 @@ func setupServer(queries *db.Queries, authService *auth.AuthService, views embed
}
settingsHandler := settingshttp.NewHandler(settingsService, logger)
+ // Initialize metrics service
+ metricsConfig := metricscommon.DefaultConfig()
nodesService := nodesservice.NewNodeService(queries, logger, keyManagementService, organizationService, nodeEventService, configService, settingsService)
+ metricsService, err := metrics.NewService(metricsConfig, queries, nodesService)
+ if err != nil {
+ log.Fatal("Failed to initialize metrics service:", err)
+ }
+ nodesService.SetMetricsService(metricsService)
+ metricsHandler := metrics.NewHandler(metricsService, logger)
+
networksService := networksservice.NewNetworkService(queries, nodesService, keyManagementService, logger, organizationService)
notificationService := notificationservice.NewNotificationService(queries, logger)
backupService := backupservice.NewBackupService(queries, logger, notificationService, dbPath, configService)
@@ -401,13 +415,13 @@ func setupServer(queries *db.Queries, authService *auth.AuthService, views embed
continue
}
- logger.Infof("Added node %s (%s) to monitoring", node.Name, node.ID)
+ logger.Infof("Added node %s (%d) to monitoring", node.Name, node.ID)
}
}()
// Initialize plugin store and manager
- pluginStore := plugin.NewSQLStore(queries)
- pluginManager, err := plugin.NewPluginManager(filepath.Join(dataPath, "plugins"))
+ pluginStore := plugin.NewSQLStore(queries, nodesService)
+ pluginManager, err := plugin.NewPluginManager(filepath.Join(dataPath, "plugins"), queries, nodesService, keyManagementService, logger)
if err != nil {
log.Fatal("Failed to initialize plugin manager:", err)
}
@@ -424,6 +438,7 @@ func setupServer(queries *db.Queries, authService *auth.AuthService, views embed
backupHandler := backuphttp.NewHandler(backupService)
notificationHandler := notificationhttp.NewNotificationHandler(notificationService)
authHandler := auth.NewHandler(authService)
+ auditHandler := audit.NewHandler(auditService, logger)
// Setup router
r := chi.NewRouter()
@@ -451,6 +466,7 @@ func setupServer(queries *db.Queries, authService *auth.AuthService, views embed
// Protected routes
r.Group(func(r chi.Router) {
r.Use(auth.AuthMiddleware(authService))
+ r.Use(audit.HTTPMiddleware(auditService)) // Add audit middleware
// Mount auth routes
authHandler.RegisterRoutes(r)
@@ -471,6 +487,11 @@ func setupServer(queries *db.Queries, authService *auth.AuthService, views embed
settingsHandler.RegisterRoutes(r)
// Mount plugin routes
pluginHandler.RegisterRoutes(r)
+ // Mount metrics routes
+ metricsHandler.RegisterRoutes(r)
+
+ // Mount audit routes
+ auditHandler.RegisterRoutes(r)
})
})
r.Get("/api/swagger/*", httpSwagger.Handler(
diff --git a/cmd/testnet/besu/besu.go b/cmd/testnet/besu/besu.go
new file mode 100644
index 0000000..8de711a
--- /dev/null
+++ b/cmd/testnet/besu/besu.go
@@ -0,0 +1,226 @@
+package besu
+
+import (
+ "fmt"
+ "os"
+ "time"
+
+ "github.com/chainlaunch/chainlaunch/cmd/common"
+ "github.com/chainlaunch/chainlaunch/pkg/common/addresses"
+ "github.com/chainlaunch/chainlaunch/pkg/common/ports"
+ "github.com/chainlaunch/chainlaunch/pkg/keymanagement/models"
+ "github.com/chainlaunch/chainlaunch/pkg/networks/http"
+ "github.com/chainlaunch/chainlaunch/pkg/nodes/types"
+ "github.com/lithammer/shortuuid/v4"
+ "github.com/spf13/cobra"
+)
+
+func generateShortUUID() string {
+ return shortuuid.New()[0:5]
+}
+
+// BesuTestnetConfig holds the parameters for creating a Besu testnet
+type BesuTestnetConfig struct {
+ Name string
+ Nodes int
+ Prefix string
+ Mode string
+ Version string
+ // Initial account balances in wei (hex format)
+ InitialBalances map[string]string
+}
+
+// BesuTestnetRunner encapsulates the config and logic for running and validating the Besu testnet command
+type BesuTestnetRunner struct {
+ Config BesuTestnetConfig
+}
+
+// Validate checks the configuration for required fields
+func (r *BesuTestnetRunner) Validate() error {
+ if r.Config.Name == "" {
+ return fmt.Errorf("--name is required")
+ }
+ if r.Config.Nodes < 1 {
+ return fmt.Errorf("--nodes must be at least 1")
+ }
+ // For QBFT consensus, require at least 4 nodes
+ if r.Config.Nodes < 4 {
+ return fmt.Errorf("--nodes must be at least 4 for QBFT consensus")
+ }
+ if r.Config.Mode != "docker" && r.Config.Mode != "service" {
+ return fmt.Errorf("--mode must be either 'docker' or 'service'")
+ }
+ return nil
+}
+
+// Run executes the Besu testnet creation logic
+func (r *BesuTestnetRunner) Run() error {
+ if err := r.Validate(); err != nil {
+ return err
+ }
+
+ client, err := common.NewClientFromEnv()
+ if err != nil {
+ return fmt.Errorf("failed to create API client: %w", err)
+ }
+
+ externalIP, err := addresses.GetExternalIP()
+ if err != nil {
+ return fmt.Errorf("failed to get external IP: %w", err)
+ }
+
+ // 1. Create all keys and collect their IDs
+ fmt.Printf("Creating %d validator keys...\n", r.Config.Nodes)
+ keyIDs := make([]int64, 0, r.Config.Nodes)
+ nodeNames := make([]string, 0, r.Config.Nodes)
+ for i := 0; i < r.Config.Nodes; i++ {
+ nodeName := fmt.Sprintf("%s-%s-%d", r.Config.Prefix, r.Config.Name, i+1)
+ nodeNames = append(nodeNames, nodeName)
+ fmt.Printf(" Creating key for node %s...\n", nodeName)
+ providerID := 1
+ isCA := 0
+ keyReq := &models.CreateKeyRequest{
+ Name: nodeName + "-key",
+ Algorithm: models.KeyAlgorithmEC,
+ ProviderID: &providerID,
+ Curve: func() *models.ECCurve { c := models.ECCurveSECP256K1; return &c }(),
+ IsCA: &isCA,
+ }
+ keyResp, err := client.CreateKey(keyReq)
+ if err != nil {
+ return fmt.Errorf("failed to create key for node %s: %w", nodeName, err)
+ }
+ fmt.Printf(" Key created: ID %d\n", keyResp.ID)
+ keyIDs = append(keyIDs, int64(keyResp.ID))
+ }
+
+ // 2. Create the Besu network with all key IDs as validators
+ fmt.Printf("Creating Besu network '%s' with %d validators...\n", r.Config.Name, len(keyIDs))
+ netReq := &http.CreateBesuNetworkRequest{
+ Name: r.Config.Name,
+ Description: "",
+ }
+ netReq.Config.Consensus = "qbft"
+ netReq.Config.ChainID = 1337
+ netReq.Config.BlockPeriod = 5
+ netReq.Config.EpochLength = 30000
+ netReq.Config.RequestTimeout = 10
+ netReq.Config.InitialValidatorKeyIds = keyIDs
+ netReq.Config.GasLimit = "0x29b92700" // 700000000 in hex
+ netReq.Config.Difficulty = "0x1" // numberToHex(1)
+ netReq.Config.MixHash = "0x63746963616c2062797a616e74696e65206661756c7420746f6c6572616e6365"
+ netReq.Config.Coinbase = "0x0000000000000000000000000000000000000000"
+ netReq.Config.Timestamp = fmt.Sprintf("0x%x", time.Now().Unix()) // Current Unix timestamp in hex (seconds)
+ netReq.Config.Nonce = "0x0" // numberToHex(0)
+
+ // Set initial account balances if provided
+ if r.Config.InitialBalances != nil {
+ netReq.Config.Alloc = make(map[string]struct {
+ Balance string `json:"balance" validate:"required,hexadecimal"`
+ })
+ for address, balance := range r.Config.InitialBalances {
+ netReq.Config.Alloc[address] = struct {
+ Balance string `json:"balance" validate:"required,hexadecimal"`
+ }{
+ Balance: balance,
+ }
+ }
+ } else {
+ netReq.Config.Alloc = map[string]struct {
+ Balance string `json:"balance" validate:"required,hexadecimal"`
+ }{}
+ }
+
+ netResp, err := client.CreateBesuNetwork(netReq)
+ if err != nil {
+ return fmt.Errorf("failed to create besu network: %w", err)
+ }
+ fmt.Printf(" Besu network created: ID %d\n", netResp.ID)
+
+ // 3. Create each Besu node, using the corresponding key
+ fmt.Printf("Creating %d Besu nodes...\n", r.Config.Nodes)
+ nodeIDs := []int64{}
+ var firstNodeEnode string
+ for i := 0; i < r.Config.Nodes; i++ {
+ nodeName := nodeNames[i]
+ keyID := keyIDs[i]
+ fmt.Printf(" Creating Besu node %s with key ID %d...\n", nodeName, keyID)
+ // Allocate ports for Besu node
+ rpcPort, err := ports.GetFreePort("besu")
+ if err != nil {
+ return fmt.Errorf("failed to allocate RPC port for node %s: %w", nodeName, err)
+ }
+ p2pPort, err := ports.GetFreePort("besu-p2p")
+ if err != nil {
+ return fmt.Errorf("failed to allocate P2P port for node %s: %w", nodeName, err)
+ }
+ metricsPort, err := ports.GetFreePort("besu-metrics")
+ if err != nil {
+ return fmt.Errorf("failed to allocate metrics port for node %s: %w", nodeName, err)
+ }
+
+ bootNodes := []string{}
+ if i > 0 {
+ bootNodes = []string{firstNodeEnode}
+ }
+
+ // Prepare Besu node config (service layer struct)
+ besuNodeConfig := &types.BesuNodeConfig{
+ BaseNodeConfig: types.BaseNodeConfig{Mode: r.Config.Mode, Type: "besu"},
+ NetworkID: int64(netResp.ID),
+ KeyID: keyID,
+ P2PPort: uint(p2pPort.Port),
+ RPCPort: uint(rpcPort.Port),
+ P2PHost: "0.0.0.0",
+ RPCHost: "0.0.0.0",
+ ExternalIP: externalIP,
+ InternalIP: externalIP,
+ Env: map[string]string{},
+ BootNodes: bootNodes,
+ MetricsEnabled: true,
+ MetricsPort: int64(metricsPort.Port),
+ MetricsProtocol: "PROMETHEUS",
+ Version: r.Config.Version,
+ }
+ nodeResp, err := client.CreateBesuNode(nodeName, besuNodeConfig)
+ if err != nil {
+ return fmt.Errorf("failed to create besu node %s: %w", nodeName, err)
+ }
+ if i == 0 {
+ firstNodeEnode = nodeResp.BesuNode.EnodeURL
+ }
+ fmt.Printf(" Node created ID: %d\n", nodeResp.ID)
+ nodeIDs = append(nodeIDs, nodeResp.ID)
+ }
+
+ fmt.Printf("Besu testnet created successfully! Network ID: %d\n", netResp.ID)
+ return nil
+}
+
+func NewBesuTestnetCmd() *cobra.Command {
+ runner := &BesuTestnetRunner{
+ Config: BesuTestnetConfig{
+ InitialBalances: make(map[string]string),
+ },
+ }
+
+ cmd := &cobra.Command{
+ Use: "besu",
+ Short: "Create a Besu testnet",
+ Run: func(cmd *cobra.Command, args []string) {
+ if err := runner.Run(); err != nil {
+ fmt.Println("Error:", err)
+ os.Exit(1)
+ }
+ },
+ }
+
+ cmd.Flags().StringVar(&runner.Config.Name, "name", "", "Name of the testnet (required)")
+ cmd.Flags().IntVar(&runner.Config.Nodes, "nodes", 1, "Number of nodes (default 1)")
+ cmd.Flags().StringVar(&runner.Config.Prefix, "prefix", "besu", "Prefix for node names")
+ cmd.Flags().StringVar(&runner.Config.Mode, "mode", "service", "Node mode (service or docker)")
+ cmd.Flags().StringVar(&runner.Config.Version, "version", "25.5.0", "Besu version (default 25.5.0)")
+ cmd.Flags().StringToStringVar(&runner.Config.InitialBalances, "initial-balance", map[string]string{}, "Initial account balances in wei (hex format), e.g. '0x1234...=0x1000000000000000000'")
+
+ return cmd
+}
diff --git a/cmd/testnet/fabric/fabric.go b/cmd/testnet/fabric/fabric.go
new file mode 100644
index 0000000..9532a49
--- /dev/null
+++ b/cmd/testnet/fabric/fabric.go
@@ -0,0 +1,344 @@
+package fabric
+
+import (
+ "fmt"
+ "net"
+ "os"
+
+ "github.com/chainlaunch/chainlaunch/cmd/common"
+ "github.com/chainlaunch/chainlaunch/pkg/common/ports"
+ fabrictypes "github.com/chainlaunch/chainlaunch/pkg/fabric/handler"
+ "github.com/chainlaunch/chainlaunch/pkg/nodes/types"
+
+ networkshttp "github.com/chainlaunch/chainlaunch/pkg/networks/http"
+ shortuuid "github.com/lithammer/shortuuid/v4"
+ "github.com/spf13/cobra"
+)
+
+func generateShortUUID() string {
+ return shortuuid.New()[0:5]
+}
+
+// getExternalIP returns the first non-loopback IPv4 address found on the host
+func getExternalIP() (string, error) {
+ interfaces, err := net.Interfaces()
+ if err != nil {
+ return "", err
+ }
+ for _, iface := range interfaces {
+ if iface.Flags&net.FlagUp == 0 || iface.Flags&net.FlagLoopback != 0 {
+ continue
+ }
+ addrs, err := iface.Addrs()
+ if err != nil {
+ continue
+ }
+ for _, addr := range addrs {
+ var ip net.IP
+ switch v := addr.(type) {
+ case *net.IPNet:
+ ip = v.IP
+ case *net.IPAddr:
+ ip = v.IP
+ }
+ if ip == nil || ip.IsLoopback() {
+ continue
+ }
+ ip = ip.To4()
+ if ip == nil {
+ continue // not an ipv4 address
+ }
+ return ip.String(), nil
+ }
+ }
+ return "", fmt.Errorf("no external IP found")
+}
+
+// FabricTestnetConfig holds the parameters for creating a Fabric testnet
+type FabricTestnetConfig struct {
+ Name string
+ Nodes int
+ Org string
+ PeerOrgs []string
+ OrdererOrgs []string
+ Channels []string
+ PeerCounts map[string]int
+ OrdererCounts map[string]int
+ Mode string
+}
+
+// FabricTestnetRunner encapsulates the config and logic for running and validating the Fabric testnet command
+type FabricTestnetRunner struct {
+ Config FabricTestnetConfig
+}
+
+// Validate checks the configuration for required fields
+func (r *FabricTestnetRunner) Validate() error {
+ if r.Config.Name == "" {
+ return fmt.Errorf("--name is required")
+ }
+ if r.Config.Org == "" {
+ return fmt.Errorf("--org is required for fabric")
+ }
+ // Add more validation as needed
+
+ // Ensure at least 3 orderers in total for consenters
+ totalOrderers := 0
+ for _, count := range r.Config.OrdererCounts {
+ totalOrderers += count
+ }
+ if totalOrderers < 3 {
+ return fmt.Errorf("at least 3 orderers are required in total for consenters (got %d)", totalOrderers)
+ }
+
+ return nil
+}
+
+// Run executes the Fabric testnet creation logic
+func (r *FabricTestnetRunner) Run() error {
+ if err := r.Validate(); err != nil {
+ return err
+ }
+
+ client, err := common.NewClientFromEnv()
+ if err != nil {
+ return fmt.Errorf("failed to create API client: %w", err)
+ }
+
+ // 1. Create organizations
+ orgIDs := map[string]int64{}
+ orgNamesWithUUID := map[string]string{}
+ for _, org := range r.Config.PeerOrgs {
+ suffixedOrg := fmt.Sprintf("%s-%s", org, generateShortUUID())
+ orgReq := fabrictypes.CreateOrganizationRequest{Name: suffixedOrg, MspID: org, ProviderID: 1}
+ resp, err := client.CreateOrganization(orgReq)
+ if err != nil {
+ return fmt.Errorf("failed to create peer org %s: %w", org, err)
+ }
+ orgIDs[org] = resp.ID
+ orgNamesWithUUID[org] = suffixedOrg
+ }
+ for _, org := range r.Config.OrdererOrgs {
+ suffixedOrg := fmt.Sprintf("%s-%s", org, generateShortUUID())
+ orgReq := fabrictypes.CreateOrganizationRequest{Name: suffixedOrg, MspID: org, ProviderID: 1}
+ resp, err := client.CreateOrganization(orgReq)
+ if err != nil {
+ return fmt.Errorf("failed to create orderer org %s: %w", org, err)
+ }
+ orgIDs[org] = resp.ID
+ orgNamesWithUUID[org] = suffixedOrg
+ }
+
+ // 2. Create nodes for each org using common helpers
+ nodeIDs := []int64{}
+ peerNodeIDsByOrg := map[string][]int64{}
+ ordererNodeIDsByOrg := map[string][]int64{}
+ for org, count := range r.Config.PeerCounts {
+ orgID := orgIDs[org]
+ for i := 0; i < count; i++ {
+ nodeName := fmt.Sprintf("%s-peer-%s", r.Config.Name, generateShortUUID())
+
+ // Allocate ports for peer node with error handling
+ listen, err := ports.GetFreePort("fabric-peer")
+ if err != nil {
+ return fmt.Errorf("failed to allocate listen port for peer %s: %w", nodeName, err)
+ }
+ chaincode, err := ports.GetFreePort("fabric-peer")
+ if err != nil {
+ return fmt.Errorf("failed to allocate chaincode port for peer %s: %w", nodeName, err)
+ }
+ events, err := ports.GetFreePort("fabric-peer")
+ if err != nil {
+ return fmt.Errorf("failed to allocate events port for peer %s: %w", nodeName, err)
+ }
+ operations, err := ports.GetFreePort("fabric-peer")
+ if err != nil {
+ return fmt.Errorf("failed to allocate operations port for peer %s: %w", nodeName, err)
+ }
+
+ // Determine external endpoint based on mode
+ externalIP := "127.0.0.1"
+ if r.Config.Mode == "docker" {
+ hostIP, err := getExternalIP()
+ if err == nil {
+ externalIP = hostIP
+ } else {
+ // fallback to 127.0.0.1 if error
+ }
+ }
+
+ peerConfig := &types.FabricPeerConfig{
+ Name: nodeName,
+ OrganizationID: orgID,
+ BaseNodeConfig: types.BaseNodeConfig{
+ Mode: r.Config.Mode,
+ },
+ MSPID: org,
+ ListenAddress: fmt.Sprintf("0.0.0.0:%d", listen.Port),
+ ChaincodeAddress: fmt.Sprintf("0.0.0.0:%d", chaincode.Port),
+ EventsAddress: fmt.Sprintf("0.0.0.0:%d", events.Port),
+ OperationsListenAddress: fmt.Sprintf("0.0.0.0:%d", operations.Port),
+ ExternalEndpoint: fmt.Sprintf("%s:%d", externalIP, listen.Port),
+ DomainNames: []string{externalIP},
+ Env: map[string]string{},
+ Version: "3.1.0",
+ AddressOverrides: []types.AddressOverride{},
+ OrdererAddressOverrides: []types.OrdererAddressOverride{},
+ }
+ nodeResp, err := client.CreatePeerNode(peerConfig)
+ if err != nil {
+ return fmt.Errorf("failed to create peer node for org %s: %w", org, err)
+ }
+ nodeIDs = append(nodeIDs, nodeResp.ID)
+ peerNodeIDsByOrg[org] = append(peerNodeIDsByOrg[org], nodeResp.ID)
+ }
+ }
+ for org, count := range r.Config.OrdererCounts {
+ orgID := orgIDs[org]
+ for i := 0; i < count; i++ {
+ nodeName := fmt.Sprintf("%s-orderer-%s", r.Config.Name, generateShortUUID())
+
+ // Allocate ports for orderer node with error handling
+ listen, err := ports.GetFreePort("fabric-orderer")
+ if err != nil {
+ return fmt.Errorf("failed to allocate listen port for orderer %s: %w", nodeName, err)
+ }
+ admin, err := ports.GetFreePort("fabric-orderer")
+ if err != nil {
+ return fmt.Errorf("failed to allocate admin port for orderer %s: %w", nodeName, err)
+ }
+ operations, err := ports.GetFreePort("fabric-orderer")
+ if err != nil {
+ return fmt.Errorf("failed to allocate operations port for orderer %s: %w", nodeName, err)
+ }
+
+ // Determine external endpoint based on mode
+ externalIP := "127.0.0.1"
+ if r.Config.Mode == "docker" {
+ hostIP, err := getExternalIP()
+ if err == nil {
+ externalIP = hostIP
+ } else {
+ // fallback to 127.0.0.1 if error
+ }
+ }
+
+ ordererConfig := &types.FabricOrdererConfig{
+ BaseNodeConfig: types.BaseNodeConfig{
+ Mode: r.Config.Mode,
+ },
+ Name: nodeName,
+ OrganizationID: orgID,
+ MSPID: org,
+ ListenAddress: fmt.Sprintf("0.0.0.0:%d", listen.Port),
+ AdminAddress: fmt.Sprintf("0.0.0.0:%d", admin.Port),
+ OperationsListenAddress: fmt.Sprintf("0.0.0.0:%d", operations.Port),
+ ExternalEndpoint: fmt.Sprintf("%s:%d", externalIP, listen.Port),
+ DomainNames: []string{externalIP},
+ Env: map[string]string{},
+ Version: "3.1.0",
+ AddressOverrides: []types.AddressOverride{},
+ }
+ nodeResp, err := client.CreateOrdererNode(ordererConfig)
+ if err != nil {
+ return fmt.Errorf("failed to create orderer node for org %s: %w", org, err)
+ }
+ nodeIDs = append(nodeIDs, nodeResp.ID)
+ ordererNodeIDsByOrg[org] = append(ordererNodeIDsByOrg[org], nodeResp.ID)
+ }
+ }
+
+ // 3. Create the network/channels using the common helper
+ // Build the FabricNetworkConfig
+ peerOrgs := []networkshttp.OrganizationConfig{}
+ ordererOrgs := []networkshttp.OrganizationConfig{}
+ for _, org := range r.Config.PeerOrgs {
+ peerOrgs = append(peerOrgs, networkshttp.OrganizationConfig{
+ ID: orgIDs[org],
+ NodeIDs: peerNodeIDsByOrg[org],
+ })
+ }
+ for _, org := range r.Config.OrdererOrgs {
+ ordererOrgs = append(ordererOrgs, networkshttp.OrganizationConfig{
+ ID: orgIDs[org],
+ NodeIDs: ordererNodeIDsByOrg[org],
+ })
+ }
+ // Optionally, you can group nodeIDs by org if needed
+
+ netReq := &networkshttp.CreateFabricNetworkRequest{
+ Name: r.Config.Name,
+ Description: "",
+ Config: networkshttp.FabricNetworkConfig{
+ PeerOrganizations: peerOrgs,
+ OrdererOrganizations: ordererOrgs,
+ ExternalPeerOrgs: []networkshttp.ExternalOrgConfig{},
+ ExternalOrdererOrgs: []networkshttp.ExternalOrgConfig{},
+ },
+ }
+
+ networkResp, err := client.CreateFabricNetwork(netReq)
+ if err != nil {
+ return fmt.Errorf("failed to create fabric network: %w", err)
+ }
+
+ fmt.Printf("Fabric testnet created successfully! Network ID: %d\n", networkResp.ID)
+
+ // Join all peers to the network
+ peerResults, peerErrs := client.JoinAllPeersToFabricNetwork(networkResp.ID)
+ for _, resp := range peerResults {
+ fmt.Printf("Peer joined network %d successfully. Network ID: %d, Status: %s\n", networkResp.ID, resp.ID, resp.Status)
+ }
+ if len(peerErrs) > 0 {
+ fmt.Println("Errors occurred while joining some peers:")
+ for _, err := range peerErrs {
+ fmt.Printf(" %v\n", err)
+ }
+ // Optionally: return fmt.Errorf("some peers failed to join the network")
+ }
+
+ // Join all orderers to the network
+ ordererResults, ordererErrs := client.JoinAllOrderersToFabricNetwork(networkResp.ID)
+ for _, resp := range ordererResults {
+ fmt.Printf("Orderer joined network %d successfully. Network ID: %d, Status: %s\n", networkResp.ID, resp.ID, resp.Status)
+ }
+ if len(ordererErrs) > 0 {
+ fmt.Println("Errors occurred while joining some orderers:")
+ for _, err := range ordererErrs {
+ fmt.Printf(" %v\n", err)
+ }
+ // Optionally: return fmt.Errorf("some orderers failed to join the network")
+ }
+
+ return nil
+}
+
+func NewFabricTestnetCmd() *cobra.Command {
+ runner := &FabricTestnetRunner{
+ Config: FabricTestnetConfig{},
+ }
+
+ cmd := &cobra.Command{
+ Use: "fabric",
+ Short: "Create a Fabric testnet",
+ Run: func(cmd *cobra.Command, args []string) {
+
+ if err := runner.Run(); err != nil {
+ fmt.Println("Error:", err)
+ os.Exit(1)
+ }
+ },
+ }
+
+ cmd.Flags().StringVar(&runner.Config.Name, "name", "", "Name of the testnet (required)")
+ cmd.Flags().IntVar(&runner.Config.Nodes, "nodes", 1, "Number of nodes (default 1)")
+ cmd.Flags().StringVar(&runner.Config.Org, "org", "", "Organization MSP ID (required)")
+ cmd.Flags().StringSliceVar(&runner.Config.PeerOrgs, "peerOrgs", nil, "List of peer organizations (comma-separated)")
+ cmd.Flags().StringSliceVar(&runner.Config.OrdererOrgs, "ordererOrgs", nil, "List of orderer organizations (comma-separated)")
+ cmd.Flags().StringSliceVar(&runner.Config.Channels, "channels", nil, "List of channels to create (comma-separated)")
+ cmd.Flags().StringToIntVar(&runner.Config.PeerCounts, "peerCounts", nil, "Number of peers per org (e.g., Org1=2,Org2=3)")
+ cmd.Flags().StringToIntVar(&runner.Config.OrdererCounts, "ordererCounts", nil, "Number of orderers per org (e.g., Orderer1=1,Orderer2=2)")
+ cmd.Flags().StringVar(&runner.Config.Mode, "mode", "service", "Node mode (default 'service')")
+
+ return cmd
+}
diff --git a/cmd/testnet/root.go b/cmd/testnet/root.go
new file mode 100644
index 0000000..fd87d89
--- /dev/null
+++ b/cmd/testnet/root.go
@@ -0,0 +1,22 @@
+package testnet
+
+import (
+ "github.com/spf13/cobra"
+ // Import subcommands for each network type
+ "github.com/chainlaunch/chainlaunch/cmd/testnet/besu"
+ "github.com/chainlaunch/chainlaunch/cmd/testnet/fabric"
+)
+
+// NewTestnetCmd returns the root testnet command
+func NewTestnetCmd() *cobra.Command {
+ cmd := &cobra.Command{
+ Use: "testnet",
+ Short: "Manage testnets for different blockchain networks",
+ }
+
+ // Add subcommands for each network type
+ cmd.AddCommand(fabric.NewFabricTestnetCmd())
+ cmd.AddCommand(besu.NewBesuTestnetCmd())
+
+ return cmd
+}
diff --git a/docs/docs.go b/docs/docs.go
index 3339d06..b24afa1 100644
--- a/docs/docs.go
+++ b/docs/docs.go
@@ -1,4 +1,4 @@
-// Package docs Code generated by swaggo/swag at 2025-05-07 17:09:25.456654 +0200 CEST m=+3.810883834. DO NOT EDIT
+// Package docs Code generated by swaggo/swag at 2025-05-22 19:46:43.732497 +0200 CEST m=+4.027560959. DO NOT EDIT
package docs
import "github.com/swaggo/swag"
@@ -24,6 +24,541 @@ const docTemplate = `{
"host": "{{.Host}}",
"basePath": "{{.BasePath}}",
"paths": {
+ "/api/v1/metrics/deploy": {
+ "post": {
+ "description": "Deploys a new Prometheus instance with the specified configuration",
+ "consumes": [
+ "application/json"
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "metrics"
+ ],
+ "summary": "Deploy a new Prometheus instance",
+ "parameters": [
+ {
+ "description": "Prometheus deployment configuration",
+ "name": "request",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/metrics.DeployPrometheusRequest"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ }
+ },
+ "400": {
+ "description": "Bad Request",
+ "schema": {
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ }
+ },
+ "500": {
+ "description": "Internal Server Error",
+ "schema": {
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ }
+ }
+ }
+ }
+ },
+ "/api/v1/metrics/node/{id}": {
+ "get": {
+ "description": "Retrieves metrics for a specific node by ID and optional PromQL query",
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "metrics"
+ ],
+ "summary": "Get metrics for a specific node",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "Node ID",
+ "name": "id",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "PromQL query to filter metrics",
+ "name": "query",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "type": "object",
+ "additionalProperties": true
+ }
+ },
+ "400": {
+ "description": "Bad Request",
+ "schema": {
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ }
+ },
+ "500": {
+ "description": "Internal Server Error",
+ "schema": {
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ }
+ }
+ }
+ }
+ },
+ "/api/v1/metrics/node/{id}/label/{label}/values": {
+ "get": {
+ "description": "Retrieves all values for a specific label, optionally filtered by metric matches and node ID",
+ "consumes": [
+ "application/json"
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "metrics"
+ ],
+ "summary": "Get label values for a specific label",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "Node ID",
+ "name": "id",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "Label name",
+ "name": "label",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "array",
+ "description": "Metric matches (e.g. {__name__=\\",
+ "name": "match",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "Label values",
+ "schema": {
+ "type": "object",
+ "additionalProperties": true
+ }
+ },
+ "400": {
+ "description": "Bad request",
+ "schema": {
+ "type": "object",
+ "additionalProperties": true
+ }
+ },
+ "500": {
+ "description": "Internal server error",
+ "schema": {
+ "type": "object",
+ "additionalProperties": true
+ }
+ }
+ }
+ }
+ },
+ "/api/v1/metrics/node/{id}/query": {
+ "post": {
+ "description": "Execute a custom Prometheus query with optional time range",
+ "consumes": [
+ "application/json"
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "metrics"
+ ],
+ "summary": "Execute custom Prometheus query",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "Node ID",
+ "name": "id",
+ "in": "path",
+ "required": true
+ },
+ {
+ "description": "Query parameters",
+ "name": "request",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/metrics.CustomQueryRequest"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/common.QueryResult"
+ }
+ },
+ "400": {
+ "description": "Bad Request",
+ "schema": {
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ }
+ },
+ "500": {
+ "description": "Internal Server Error",
+ "schema": {
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ }
+ }
+ }
+ }
+ },
+ "/api/v1/metrics/node/{id}/range": {
+ "get": {
+ "description": "Retrieves metrics for a specific node within a specified time range",
+ "consumes": [
+ "application/json"
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "metrics"
+ ],
+ "summary": "Get metrics for a specific node with time range",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "Node ID",
+ "name": "id",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "PromQL query",
+ "name": "query",
+ "in": "query",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "Start time (RFC3339 format)",
+ "name": "start",
+ "in": "query",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "End time (RFC3339 format)",
+ "name": "end",
+ "in": "query",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "Step duration (e.g. 1m, 5m, 1h)",
+ "name": "step",
+ "in": "query",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "Metrics data",
+ "schema": {
+ "type": "object",
+ "additionalProperties": true
+ }
+ },
+ "400": {
+ "description": "Bad request",
+ "schema": {
+ "type": "object",
+ "additionalProperties": true
+ }
+ },
+ "500": {
+ "description": "Internal server error",
+ "schema": {
+ "type": "object",
+ "additionalProperties": true
+ }
+ }
+ }
+ }
+ },
+ "/api/v1/metrics/reload": {
+ "post": {
+ "description": "Triggers a reload of the Prometheus configuration to pick up any changes",
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "metrics"
+ ],
+ "summary": "Reload Prometheus configuration",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ }
+ },
+ "500": {
+ "description": "Internal Server Error",
+ "schema": {
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ }
+ }
+ }
+ }
+ },
+ "/api/v1/metrics/status": {
+ "get": {
+ "description": "Returns the current status of the Prometheus instance including version, port, and configuration",
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "metrics"
+ ],
+ "summary": "Get Prometheus status",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/github_com_chainlaunch_chainlaunch_pkg_metrics_common.Status"
+ }
+ },
+ "500": {
+ "description": "Internal Server Error",
+ "schema": {
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ }
+ }
+ }
+ }
+ },
+ "/audit/logs": {
+ "get": {
+ "description": "Retrieves a paginated list of audit logs with optional filters",
+ "consumes": [
+ "application/json"
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "audit"
+ ],
+ "summary": "List audit logs",
+ "parameters": [
+ {
+ "type": "integer",
+ "description": "Page number (default: 1)",
+ "name": "page",
+ "in": "query"
+ },
+ {
+ "type": "integer",
+ "description": "Page size (default: 10)",
+ "name": "page_size",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "Start time (RFC3339 format)",
+ "name": "start",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "End time (RFC3339 format)",
+ "name": "end",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "Filter by event type",
+ "name": "event_type",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "Filter by user ID",
+ "name": "user_id",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/audit.ListLogsResponse"
+ }
+ },
+ "400": {
+ "description": "Bad Request",
+ "schema": {
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ }
+ },
+ "401": {
+ "description": "Unauthorized",
+ "schema": {
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ }
+ },
+ "403": {
+ "description": "Forbidden",
+ "schema": {
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ }
+ },
+ "500": {
+ "description": "Internal Server Error",
+ "schema": {
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ }
+ }
+ }
+ }
+ },
+ "/audit/logs/{id}": {
+ "get": {
+ "description": "Retrieves a specific audit log by ID",
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "audit"
+ ],
+ "summary": "Get audit log",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "Log ID",
+ "name": "id",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/audit.Event"
+ }
+ },
+ "400": {
+ "description": "Bad Request",
+ "schema": {
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ }
+ },
+ "401": {
+ "description": "Unauthorized",
+ "schema": {
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ }
+ },
+ "403": {
+ "description": "Forbidden",
+ "schema": {
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ }
+ },
+ "404": {
+ "description": "Not Found",
+ "schema": {
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ }
+ },
+ "500": {
+ "description": "Internal Server Error",
+ "schema": {
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ }
+ }
+ }
+ }
+ },
"/auth/change-password": {
"post": {
"security": [
@@ -3704,6 +4239,66 @@ const docTemplate = `{
}
}
},
+ "/nodes/{id}/channels/{channelID}/chaincodes": {
+ "get": {
+ "description": "Retrieves all committed chaincodes for a specific channel on a Fabric peer node",
+ "consumes": [
+ "application/json"
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Nodes"
+ ],
+ "summary": "Get committed chaincodes for a Fabric peer",
+ "parameters": [
+ {
+ "type": "integer",
+ "description": "Node ID",
+ "name": "id",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "Channel ID",
+ "name": "channelID",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/http.ChaincodeResponse"
+ }
+ }
+ },
+ "400": {
+ "description": "Validation error",
+ "schema": {
+ "$ref": "#/definitions/response.ErrorResponse"
+ }
+ },
+ "404": {
+ "description": "Node not found",
+ "schema": {
+ "$ref": "#/definitions/response.ErrorResponse"
+ }
+ },
+ "500": {
+ "description": "Internal server error",
+ "schema": {
+ "$ref": "#/definitions/response.ErrorResponse"
+ }
+ }
+ }
+ }
+ },
"/nodes/{id}/events": {
"get": {
"description": "Get a paginated list of events for a specific node",
@@ -4276,14 +4871,27 @@ const docTemplate = `{
"Organizations"
],
"summary": "List all Fabric organizations",
+ "parameters": [
+ {
+ "type": "integer",
+ "default": 20,
+ "description": "Maximum number of organizations to return",
+ "name": "limit",
+ "in": "query"
+ },
+ {
+ "type": "integer",
+ "default": 0,
+ "description": "Number of organizations to skip",
+ "name": "offset",
+ "in": "query"
+ }
+ ],
"responses": {
"200": {
"description": "OK",
"schema": {
- "type": "array",
- "items": {
- "$ref": "#/definitions/handler.OrganizationResponse"
- }
+ "$ref": "#/definitions/handler.PaginatedOrganizationsResponse"
}
},
"500": {
@@ -5172,6 +5780,53 @@ const docTemplate = `{
}
}
},
+ "/plugins/{name}/resume": {
+ "post": {
+ "description": "Resume a previously deployed plugin",
+ "consumes": [
+ "application/json"
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Plugins"
+ ],
+ "summary": "Resume a plugin deployment",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "Plugin name",
+ "name": "name",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ }
+ },
+ "404": {
+ "description": "Not Found",
+ "schema": {
+ "$ref": "#/definitions/response.Response"
+ }
+ },
+ "500": {
+ "description": "Internal Server Error",
+ "schema": {
+ "$ref": "#/definitions/response.Response"
+ }
+ }
+ }
+ }
+ },
"/plugins/{name}/services": {
"get": {
"description": "Get all services defined in the plugin's docker-compose configuration",
@@ -5760,6 +6415,96 @@ const docTemplate = `{
}
},
"definitions": {
+ "audit.Event": {
+ "type": "object",
+ "properties": {
+ "affectedResource": {
+ "type": "string"
+ },
+ "details": {
+ "type": "object",
+ "additionalProperties": true
+ },
+ "eventOutcome": {
+ "$ref": "#/definitions/audit.EventOutcome"
+ },
+ "eventSource": {
+ "type": "string"
+ },
+ "eventType": {
+ "type": "string"
+ },
+ "id": {
+ "type": "integer"
+ },
+ "requestId": {
+ "type": "string"
+ },
+ "sessionId": {
+ "type": "string"
+ },
+ "severity": {
+ "$ref": "#/definitions/audit.Severity"
+ },
+ "sourceIp": {
+ "type": "string"
+ },
+ "timestamp": {
+ "type": "string"
+ },
+ "userIdentity": {
+ "type": "integer"
+ }
+ }
+ },
+ "audit.EventOutcome": {
+ "type": "string",
+ "enum": [
+ "SUCCESS",
+ "FAILURE",
+ "PENDING"
+ ],
+ "x-enum-varnames": [
+ "EventOutcomeSuccess",
+ "EventOutcomeFailure",
+ "EventOutcomePending"
+ ]
+ },
+ "audit.ListLogsResponse": {
+ "type": "object",
+ "properties": {
+ "items": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/audit.Event"
+ }
+ },
+ "page": {
+ "type": "integer"
+ },
+ "page_size": {
+ "type": "integer"
+ },
+ "total_count": {
+ "type": "integer"
+ }
+ }
+ },
+ "audit.Severity": {
+ "type": "string",
+ "enum": [
+ "DEBUG",
+ "INFO",
+ "WARNING",
+ "CRITICAL"
+ ],
+ "x-enum-varnames": [
+ "SeverityDebug",
+ "SeverityInfo",
+ "SeverityWarning",
+ "SeverityCritical"
+ ]
+ },
"auth.ChangePasswordRequest": {
"type": "object",
"required": [
@@ -6024,6 +6769,86 @@ const docTemplate = `{
"CHAINCODE_PACKAGE"
]
},
+ "common.QueryResult": {
+ "type": "object",
+ "properties": {
+ "data": {
+ "type": "object",
+ "properties": {
+ "result": {
+ "type": "array",
+ "items": {
+ "type": "object",
+ "properties": {
+ "metric": {
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "value": {
+ "description": "For instant queries",
+ "type": "array",
+ "items": {}
+ },
+ "values": {
+ "description": "For range queries (matrix)",
+ "type": "array",
+ "items": {
+ "type": "array",
+ "items": {}
+ }
+ }
+ }
+ }
+ },
+ "resultType": {
+ "type": "string"
+ }
+ }
+ },
+ "status": {
+ "type": "string"
+ }
+ }
+ },
+ "github_com_chainlaunch_chainlaunch_pkg_metrics_common.Status": {
+ "type": "object",
+ "properties": {
+ "deployment_mode": {
+ "description": "DeploymentMode is the current deployment mode",
+ "type": "string"
+ },
+ "error": {
+ "description": "Error is any error that occurred while getting the status",
+ "type": "string"
+ },
+ "port": {
+ "description": "Port is the port Prometheus is listening on",
+ "type": "integer"
+ },
+ "scrape_interval": {
+ "description": "ScrapeInterval is the current scrape interval",
+ "allOf": [
+ {
+ "$ref": "#/definitions/time.Duration"
+ }
+ ]
+ },
+ "started_at": {
+ "description": "StartedAt is when the instance was started",
+ "type": "string"
+ },
+ "status": {
+ "description": "Status is the current status of the Prometheus instance (e.g. \"running\", \"stopped\", \"not_deployed\")",
+ "type": "string"
+ },
+ "version": {
+ "description": "Version is the version of Prometheus being used",
+ "type": "string"
+ }
+ }
+ },
"github_com_chainlaunch_chainlaunch_pkg_networks_http.ErrorResponse": {
"type": "object",
"properties": {
@@ -6136,6 +6961,15 @@ const docTemplate = `{
"handler.OrganizationResponse": {
"type": "object",
"properties": {
+ "adminSignKeyId": {
+ "type": "integer"
+ },
+ "adminTlsKeyId": {
+ "type": "integer"
+ },
+ "clientSignKeyId": {
+ "type": "integer"
+ },
"createdAt": {
"type": "string"
},
@@ -6163,11 +6997,31 @@ const docTemplate = `{
"tlsCertificate": {
"type": "string"
},
- "tlsPublicKey": {
- "type": "string"
+ "tlsPublicKey": {
+ "type": "string"
+ },
+ "updatedAt": {
+ "type": "string"
+ }
+ }
+ },
+ "handler.PaginatedOrganizationsResponse": {
+ "type": "object",
+ "properties": {
+ "count": {
+ "type": "integer"
+ },
+ "items": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/handler.OrganizationResponse"
+ }
+ },
+ "limit": {
+ "type": "integer"
},
- "updatedAt": {
- "type": "string"
+ "offset": {
+ "type": "integer"
}
}
},
@@ -6500,6 +7354,29 @@ const docTemplate = `{
}
}
},
+ "http.ChaincodeResponse": {
+ "type": "object",
+ "properties": {
+ "endorsementPlugin": {
+ "type": "string"
+ },
+ "initRequired": {
+ "type": "boolean"
+ },
+ "name": {
+ "type": "string"
+ },
+ "sequence": {
+ "type": "integer"
+ },
+ "validationPlugin": {
+ "type": "string"
+ },
+ "version": {
+ "type": "string"
+ }
+ }
+ },
"http.ChannelConfigResponse": {
"type": "object",
"properties": {
@@ -7562,6 +8439,12 @@ const docTemplate = `{
"internalIp": {
"type": "string"
},
+ "metricsEnabled": {
+ "type": "boolean"
+ },
+ "metricsPort": {
+ "type": "integer"
+ },
"networkId": {
"type": "integer"
},
@@ -7846,6 +8729,49 @@ const docTemplate = `{
}
}
},
+ "metrics.CustomQueryRequest": {
+ "type": "object",
+ "required": [
+ "query"
+ ],
+ "properties": {
+ "end": {
+ "type": "string"
+ },
+ "query": {
+ "type": "string"
+ },
+ "start": {
+ "type": "string"
+ },
+ "step": {
+ "type": "string"
+ }
+ }
+ },
+ "metrics.DeployPrometheusRequest": {
+ "type": "object",
+ "required": [
+ "deployment_mode",
+ "prometheus_port",
+ "prometheus_version",
+ "scrape_interval"
+ ],
+ "properties": {
+ "deployment_mode": {
+ "type": "string"
+ },
+ "prometheus_port": {
+ "type": "integer"
+ },
+ "prometheus_version": {
+ "type": "string"
+ },
+ "scrape_interval": {
+ "type": "integer"
+ }
+ }
+ },
"models.CertificateRequest": {
"type": "object",
"required": [
@@ -8303,14 +9229,7 @@ const docTemplate = `{
"response.ErrorResponse": {
"type": "object",
"properties": {
- "details": {
- "type": "object",
- "additionalProperties": true
- },
- "message": {
- "type": "string"
- },
- "type": {
+ "error": {
"type": "string"
}
}
@@ -8319,11 +9238,8 @@ const docTemplate = `{
"type": "object",
"properties": {
"data": {},
- "error": {
- "$ref": "#/definitions/response.ErrorResponse"
- },
- "success": {
- "type": "boolean"
+ "message": {
+ "type": "string"
}
}
},
@@ -8342,6 +9258,19 @@ const docTemplate = `{
"internalIp": {
"type": "string"
},
+ "metricsEnabled": {
+ "description": "Metrics configuration",
+ "type": "boolean"
+ },
+ "metricsHost": {
+ "type": "string"
+ },
+ "metricsPort": {
+ "type": "integer"
+ },
+ "metricsProtocol": {
+ "type": "string"
+ },
"mode": {
"$ref": "#/definitions/service.Mode"
},
@@ -8380,6 +9309,19 @@ const docTemplate = `{
"keyId": {
"type": "integer"
},
+ "metricsEnabled": {
+ "description": "Metrics configuration",
+ "type": "boolean"
+ },
+ "metricsHost": {
+ "type": "string"
+ },
+ "metricsPort": {
+ "type": "integer"
+ },
+ "metricsProtocol": {
+ "type": "string"
+ },
"mode": {
"type": "string"
},
@@ -8555,7 +9497,7 @@ const docTemplate = `{
"type": "integer"
},
"node": {
- "$ref": "#/definitions/service.Node"
+ "$ref": "#/definitions/service.NodeResponse"
},
"nodeId": {
"type": "integer"
@@ -8571,84 +9513,86 @@ const docTemplate = `{
}
}
},
- "service.Node": {
+ "service.NodeDefaults": {
"type": "object",
"properties": {
- "createdAt": {
+ "adminAddress": {
"type": "string"
},
- "deploymentConfig": {
- "description": "Node deployment configuration interface that can be one of: FabricPeerDeploymentConfig, FabricOrdererDeploymentConfig, or BesuNodeDeploymentConfig"
- },
- "endpoint": {
+ "chaincodeAddress": {
"type": "string"
},
- "errorMessage": {
+ "containerName": {
"type": "string"
},
- "id": {
- "type": "integer"
+ "errorLogPath": {
+ "type": "string"
},
- "mspId": {
+ "eventsAddress": {
"type": "string"
},
- "name": {
+ "externalEndpoint": {
"type": "string"
},
- "nodeConfig": {
- "description": "Base interface for all node configurations"
+ "listenAddress": {
+ "type": "string"
},
- "nodeType": {
- "$ref": "#/definitions/types.NodeType"
+ "logPath": {
+ "type": "string"
},
- "platform": {
- "$ref": "#/definitions/types.BlockchainPlatform"
+ "mode": {
+ "$ref": "#/definitions/service.Mode"
},
- "publicEndpoint": {
+ "operationsListenAddress": {
"type": "string"
},
- "status": {
- "$ref": "#/definitions/types.NodeStatus"
- },
- "updatedAt": {
+ "serviceName": {
"type": "string"
}
}
},
- "service.NodeDefaults": {
+ "service.NodeResponse": {
"type": "object",
"properties": {
- "adminAddress": {
- "type": "string"
+ "besuNode": {
+ "$ref": "#/definitions/service.BesuNodeProperties"
},
- "chaincodeAddress": {
+ "createdAt": {
"type": "string"
},
- "containerName": {
+ "endpoint": {
"type": "string"
},
- "errorLogPath": {
+ "errorMessage": {
"type": "string"
},
- "eventsAddress": {
- "type": "string"
+ "fabricOrderer": {
+ "$ref": "#/definitions/service.FabricOrdererProperties"
},
- "externalEndpoint": {
- "type": "string"
+ "fabricPeer": {
+ "description": "Type-specific fields",
+ "allOf": [
+ {
+ "$ref": "#/definitions/service.FabricPeerProperties"
+ }
+ ]
},
- "listenAddress": {
- "type": "string"
+ "id": {
+ "type": "integer"
},
- "logPath": {
+ "name": {
"type": "string"
},
- "mode": {
- "$ref": "#/definitions/service.Mode"
+ "nodeType": {
+ "$ref": "#/definitions/types.NodeType"
},
- "operationsListenAddress": {
+ "platform": {
"type": "string"
},
- "serviceName": {
+ "status": {
+ "type": "string"
+ },
+ "updatedAt": {
"type": "string"
}
}
@@ -8707,6 +9651,29 @@ const docTemplate = `{
}
}
},
+ "time.Duration": {
+ "type": "integer",
+ "enum": [
+ -9223372036854775808,
+ 9223372036854775807,
+ 1,
+ 1000,
+ 1000000,
+ 1000000000,
+ 60000000000,
+ 3600000000000
+ ],
+ "x-enum-varnames": [
+ "minDuration",
+ "maxDuration",
+ "Nanosecond",
+ "Microsecond",
+ "Millisecond",
+ "Second",
+ "Minute",
+ "Hour"
+ ]
+ },
"types.AddressOverride": {
"type": "object",
"properties": {
@@ -8755,6 +9722,15 @@ const docTemplate = `{
"keyId": {
"type": "integer"
},
+ "metricsEnabled": {
+ "type": "boolean"
+ },
+ "metricsPort": {
+ "type": "integer"
+ },
+ "metricsProtocol": {
+ "type": "string"
+ },
"mode": {
"description": "@Description The deployment mode (service or docker)",
"type": "string",
@@ -8779,6 +9755,9 @@ const docTemplate = `{
"description": "@Description The type of node (fabric-peer, fabric-orderer, besu)",
"type": "string",
"example": "fabric-peer"
+ },
+ "version": {
+ "type": "string"
}
}
},
@@ -8831,6 +9810,44 @@ const docTemplate = `{
}
}
},
+ "types.Documentation": {
+ "type": "object",
+ "properties": {
+ "examples": {
+ "description": "Examples contains example configurations and usage",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/types.Example"
+ }
+ },
+ "readme": {
+ "description": "README contains the main documentation for the plugin",
+ "type": "string"
+ },
+ "troubleshooting": {
+ "description": "Troubleshooting contains common issues and their solutions",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/types.TroubleshootingItem"
+ }
+ }
+ }
+ },
+ "types.Example": {
+ "type": "object",
+ "properties": {
+ "description": {
+ "type": "string"
+ },
+ "name": {
+ "type": "string"
+ },
+ "parameters": {
+ "type": "object",
+ "additionalProperties": true
+ }
+ }
+ },
"types.FabricOrdererConfig": {
"type": "object",
"required": [
@@ -8992,35 +10009,32 @@ const docTemplate = `{
"types.Metadata": {
"type": "object",
"properties": {
+ "author": {
+ "type": "string"
+ },
+ "description": {
+ "type": "string"
+ },
+ "license": {
+ "type": "string"
+ },
"name": {
"type": "string"
},
+ "repository": {
+ "type": "string"
+ },
+ "tags": {
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
"version": {
"type": "string"
}
}
},
- "types.NodeStatus": {
- "type": "string",
- "enum": [
- "PENDING",
- "RUNNING",
- "STOPPED",
- "STOPPING",
- "STARTING",
- "UPDATING",
- "ERROR"
- ],
- "x-enum-varnames": [
- "NodeStatusPending",
- "NodeStatusRunning",
- "NodeStatusStopped",
- "NodeStatusStopping",
- "NodeStatusStarting",
- "NodeStatusUpdating",
- "NodeStatusError"
- ]
- },
"types.NodeType": {
"type": "string",
"enum": [
@@ -9073,6 +10087,9 @@ const docTemplate = `{
},
"type": {
"type": "string"
+ },
+ "x-source": {
+ "$ref": "#/definitions/types.XSourceType"
}
}
},
@@ -9105,11 +10122,45 @@ const docTemplate = `{
"dockerCompose": {
"$ref": "#/definitions/types.DockerCompose"
},
+ "documentation": {
+ "$ref": "#/definitions/types.Documentation"
+ },
"parameters": {
"$ref": "#/definitions/github_com_chainlaunch_chainlaunch_pkg_plugin_types.Parameters"
}
}
},
+ "types.TroubleshootingItem": {
+ "type": "object",
+ "properties": {
+ "description": {
+ "type": "string"
+ },
+ "problem": {
+ "type": "string"
+ },
+ "solution": {
+ "type": "string"
+ }
+ }
+ },
+ "types.XSourceType": {
+ "type": "string",
+ "enum": [
+ "fabric-peer",
+ "key",
+ "fabric-org",
+ "fabric-network",
+ "fabric-key"
+ ],
+ "x-enum-varnames": [
+ "XSourceFabricPeer",
+ "XSourceKey",
+ "XSourceFabricOrg",
+ "XSourceFabricNetwork",
+ "XSourceFabricKey"
+ ]
+ },
"url.URL": {
"type": "object",
"properties": {
diff --git a/docs/swagger.json b/docs/swagger.json
index 0c20543..bbbace1 100644
--- a/docs/swagger.json
+++ b/docs/swagger.json
@@ -22,6 +22,541 @@
"host": "localhost:8100",
"basePath": "/api/v1",
"paths": {
+ "/api/v1/metrics/deploy": {
+ "post": {
+ "description": "Deploys a new Prometheus instance with the specified configuration",
+ "consumes": [
+ "application/json"
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "metrics"
+ ],
+ "summary": "Deploy a new Prometheus instance",
+ "parameters": [
+ {
+ "description": "Prometheus deployment configuration",
+ "name": "request",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/metrics.DeployPrometheusRequest"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ }
+ },
+ "400": {
+ "description": "Bad Request",
+ "schema": {
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ }
+ },
+ "500": {
+ "description": "Internal Server Error",
+ "schema": {
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ }
+ }
+ }
+ }
+ },
+ "/api/v1/metrics/node/{id}": {
+ "get": {
+ "description": "Retrieves metrics for a specific node by ID and optional PromQL query",
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "metrics"
+ ],
+ "summary": "Get metrics for a specific node",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "Node ID",
+ "name": "id",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "PromQL query to filter metrics",
+ "name": "query",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "type": "object",
+ "additionalProperties": true
+ }
+ },
+ "400": {
+ "description": "Bad Request",
+ "schema": {
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ }
+ },
+ "500": {
+ "description": "Internal Server Error",
+ "schema": {
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ }
+ }
+ }
+ }
+ },
+ "/api/v1/metrics/node/{id}/label/{label}/values": {
+ "get": {
+ "description": "Retrieves all values for a specific label, optionally filtered by metric matches and node ID",
+ "consumes": [
+ "application/json"
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "metrics"
+ ],
+ "summary": "Get label values for a specific label",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "Node ID",
+ "name": "id",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "Label name",
+ "name": "label",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "array",
+ "description": "Metric matches (e.g. {__name__=\\",
+ "name": "match",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "Label values",
+ "schema": {
+ "type": "object",
+ "additionalProperties": true
+ }
+ },
+ "400": {
+ "description": "Bad request",
+ "schema": {
+ "type": "object",
+ "additionalProperties": true
+ }
+ },
+ "500": {
+ "description": "Internal server error",
+ "schema": {
+ "type": "object",
+ "additionalProperties": true
+ }
+ }
+ }
+ }
+ },
+ "/api/v1/metrics/node/{id}/query": {
+ "post": {
+ "description": "Execute a custom Prometheus query with optional time range",
+ "consumes": [
+ "application/json"
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "metrics"
+ ],
+ "summary": "Execute custom Prometheus query",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "Node ID",
+ "name": "id",
+ "in": "path",
+ "required": true
+ },
+ {
+ "description": "Query parameters",
+ "name": "request",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/metrics.CustomQueryRequest"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/common.QueryResult"
+ }
+ },
+ "400": {
+ "description": "Bad Request",
+ "schema": {
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ }
+ },
+ "500": {
+ "description": "Internal Server Error",
+ "schema": {
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ }
+ }
+ }
+ }
+ },
+ "/api/v1/metrics/node/{id}/range": {
+ "get": {
+ "description": "Retrieves metrics for a specific node within a specified time range",
+ "consumes": [
+ "application/json"
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "metrics"
+ ],
+ "summary": "Get metrics for a specific node with time range",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "Node ID",
+ "name": "id",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "PromQL query",
+ "name": "query",
+ "in": "query",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "Start time (RFC3339 format)",
+ "name": "start",
+ "in": "query",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "End time (RFC3339 format)",
+ "name": "end",
+ "in": "query",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "Step duration (e.g. 1m, 5m, 1h)",
+ "name": "step",
+ "in": "query",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "Metrics data",
+ "schema": {
+ "type": "object",
+ "additionalProperties": true
+ }
+ },
+ "400": {
+ "description": "Bad request",
+ "schema": {
+ "type": "object",
+ "additionalProperties": true
+ }
+ },
+ "500": {
+ "description": "Internal server error",
+ "schema": {
+ "type": "object",
+ "additionalProperties": true
+ }
+ }
+ }
+ }
+ },
+ "/api/v1/metrics/reload": {
+ "post": {
+ "description": "Triggers a reload of the Prometheus configuration to pick up any changes",
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "metrics"
+ ],
+ "summary": "Reload Prometheus configuration",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ }
+ },
+ "500": {
+ "description": "Internal Server Error",
+ "schema": {
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ }
+ }
+ }
+ }
+ },
+ "/api/v1/metrics/status": {
+ "get": {
+ "description": "Returns the current status of the Prometheus instance including version, port, and configuration",
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "metrics"
+ ],
+ "summary": "Get Prometheus status",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/github_com_chainlaunch_chainlaunch_pkg_metrics_common.Status"
+ }
+ },
+ "500": {
+ "description": "Internal Server Error",
+ "schema": {
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ }
+ }
+ }
+ }
+ },
+ "/audit/logs": {
+ "get": {
+ "description": "Retrieves a paginated list of audit logs with optional filters",
+ "consumes": [
+ "application/json"
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "audit"
+ ],
+ "summary": "List audit logs",
+ "parameters": [
+ {
+ "type": "integer",
+ "description": "Page number (default: 1)",
+ "name": "page",
+ "in": "query"
+ },
+ {
+ "type": "integer",
+ "description": "Page size (default: 10)",
+ "name": "page_size",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "Start time (RFC3339 format)",
+ "name": "start",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "End time (RFC3339 format)",
+ "name": "end",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "Filter by event type",
+ "name": "event_type",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "Filter by user ID",
+ "name": "user_id",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/audit.ListLogsResponse"
+ }
+ },
+ "400": {
+ "description": "Bad Request",
+ "schema": {
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ }
+ },
+ "401": {
+ "description": "Unauthorized",
+ "schema": {
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ }
+ },
+ "403": {
+ "description": "Forbidden",
+ "schema": {
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ }
+ },
+ "500": {
+ "description": "Internal Server Error",
+ "schema": {
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ }
+ }
+ }
+ }
+ },
+ "/audit/logs/{id}": {
+ "get": {
+ "description": "Retrieves a specific audit log by ID",
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "audit"
+ ],
+ "summary": "Get audit log",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "Log ID",
+ "name": "id",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/audit.Event"
+ }
+ },
+ "400": {
+ "description": "Bad Request",
+ "schema": {
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ }
+ },
+ "401": {
+ "description": "Unauthorized",
+ "schema": {
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ }
+ },
+ "403": {
+ "description": "Forbidden",
+ "schema": {
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ }
+ },
+ "404": {
+ "description": "Not Found",
+ "schema": {
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ }
+ },
+ "500": {
+ "description": "Internal Server Error",
+ "schema": {
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ }
+ }
+ }
+ }
+ },
"/auth/change-password": {
"post": {
"security": [
@@ -3702,6 +4237,66 @@
}
}
},
+ "/nodes/{id}/channels/{channelID}/chaincodes": {
+ "get": {
+ "description": "Retrieves all committed chaincodes for a specific channel on a Fabric peer node",
+ "consumes": [
+ "application/json"
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Nodes"
+ ],
+ "summary": "Get committed chaincodes for a Fabric peer",
+ "parameters": [
+ {
+ "type": "integer",
+ "description": "Node ID",
+ "name": "id",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "Channel ID",
+ "name": "channelID",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/http.ChaincodeResponse"
+ }
+ }
+ },
+ "400": {
+ "description": "Validation error",
+ "schema": {
+ "$ref": "#/definitions/response.ErrorResponse"
+ }
+ },
+ "404": {
+ "description": "Node not found",
+ "schema": {
+ "$ref": "#/definitions/response.ErrorResponse"
+ }
+ },
+ "500": {
+ "description": "Internal server error",
+ "schema": {
+ "$ref": "#/definitions/response.ErrorResponse"
+ }
+ }
+ }
+ }
+ },
"/nodes/{id}/events": {
"get": {
"description": "Get a paginated list of events for a specific node",
@@ -4274,14 +4869,27 @@
"Organizations"
],
"summary": "List all Fabric organizations",
+ "parameters": [
+ {
+ "type": "integer",
+ "default": 20,
+ "description": "Maximum number of organizations to return",
+ "name": "limit",
+ "in": "query"
+ },
+ {
+ "type": "integer",
+ "default": 0,
+ "description": "Number of organizations to skip",
+ "name": "offset",
+ "in": "query"
+ }
+ ],
"responses": {
"200": {
"description": "OK",
"schema": {
- "type": "array",
- "items": {
- "$ref": "#/definitions/handler.OrganizationResponse"
- }
+ "$ref": "#/definitions/handler.PaginatedOrganizationsResponse"
}
},
"500": {
@@ -5170,6 +5778,53 @@
}
}
},
+ "/plugins/{name}/resume": {
+ "post": {
+ "description": "Resume a previously deployed plugin",
+ "consumes": [
+ "application/json"
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Plugins"
+ ],
+ "summary": "Resume a plugin deployment",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "Plugin name",
+ "name": "name",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ }
+ },
+ "404": {
+ "description": "Not Found",
+ "schema": {
+ "$ref": "#/definitions/response.Response"
+ }
+ },
+ "500": {
+ "description": "Internal Server Error",
+ "schema": {
+ "$ref": "#/definitions/response.Response"
+ }
+ }
+ }
+ }
+ },
"/plugins/{name}/services": {
"get": {
"description": "Get all services defined in the plugin's docker-compose configuration",
@@ -5758,6 +6413,96 @@
}
},
"definitions": {
+ "audit.Event": {
+ "type": "object",
+ "properties": {
+ "affectedResource": {
+ "type": "string"
+ },
+ "details": {
+ "type": "object",
+ "additionalProperties": true
+ },
+ "eventOutcome": {
+ "$ref": "#/definitions/audit.EventOutcome"
+ },
+ "eventSource": {
+ "type": "string"
+ },
+ "eventType": {
+ "type": "string"
+ },
+ "id": {
+ "type": "integer"
+ },
+ "requestId": {
+ "type": "string"
+ },
+ "sessionId": {
+ "type": "string"
+ },
+ "severity": {
+ "$ref": "#/definitions/audit.Severity"
+ },
+ "sourceIp": {
+ "type": "string"
+ },
+ "timestamp": {
+ "type": "string"
+ },
+ "userIdentity": {
+ "type": "integer"
+ }
+ }
+ },
+ "audit.EventOutcome": {
+ "type": "string",
+ "enum": [
+ "SUCCESS",
+ "FAILURE",
+ "PENDING"
+ ],
+ "x-enum-varnames": [
+ "EventOutcomeSuccess",
+ "EventOutcomeFailure",
+ "EventOutcomePending"
+ ]
+ },
+ "audit.ListLogsResponse": {
+ "type": "object",
+ "properties": {
+ "items": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/audit.Event"
+ }
+ },
+ "page": {
+ "type": "integer"
+ },
+ "page_size": {
+ "type": "integer"
+ },
+ "total_count": {
+ "type": "integer"
+ }
+ }
+ },
+ "audit.Severity": {
+ "type": "string",
+ "enum": [
+ "DEBUG",
+ "INFO",
+ "WARNING",
+ "CRITICAL"
+ ],
+ "x-enum-varnames": [
+ "SeverityDebug",
+ "SeverityInfo",
+ "SeverityWarning",
+ "SeverityCritical"
+ ]
+ },
"auth.ChangePasswordRequest": {
"type": "object",
"required": [
@@ -6022,6 +6767,86 @@
"CHAINCODE_PACKAGE"
]
},
+ "common.QueryResult": {
+ "type": "object",
+ "properties": {
+ "data": {
+ "type": "object",
+ "properties": {
+ "result": {
+ "type": "array",
+ "items": {
+ "type": "object",
+ "properties": {
+ "metric": {
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "value": {
+ "description": "For instant queries",
+ "type": "array",
+ "items": {}
+ },
+ "values": {
+ "description": "For range queries (matrix)",
+ "type": "array",
+ "items": {
+ "type": "array",
+ "items": {}
+ }
+ }
+ }
+ }
+ },
+ "resultType": {
+ "type": "string"
+ }
+ }
+ },
+ "status": {
+ "type": "string"
+ }
+ }
+ },
+ "github_com_chainlaunch_chainlaunch_pkg_metrics_common.Status": {
+ "type": "object",
+ "properties": {
+ "deployment_mode": {
+ "description": "DeploymentMode is the current deployment mode",
+ "type": "string"
+ },
+ "error": {
+ "description": "Error is any error that occurred while getting the status",
+ "type": "string"
+ },
+ "port": {
+ "description": "Port is the port Prometheus is listening on",
+ "type": "integer"
+ },
+ "scrape_interval": {
+ "description": "ScrapeInterval is the current scrape interval",
+ "allOf": [
+ {
+ "$ref": "#/definitions/time.Duration"
+ }
+ ]
+ },
+ "started_at": {
+ "description": "StartedAt is when the instance was started",
+ "type": "string"
+ },
+ "status": {
+ "description": "Status is the current status of the Prometheus instance (e.g. \"running\", \"stopped\", \"not_deployed\")",
+ "type": "string"
+ },
+ "version": {
+ "description": "Version is the version of Prometheus being used",
+ "type": "string"
+ }
+ }
+ },
"github_com_chainlaunch_chainlaunch_pkg_networks_http.ErrorResponse": {
"type": "object",
"properties": {
@@ -6134,6 +6959,15 @@
"handler.OrganizationResponse": {
"type": "object",
"properties": {
+ "adminSignKeyId": {
+ "type": "integer"
+ },
+ "adminTlsKeyId": {
+ "type": "integer"
+ },
+ "clientSignKeyId": {
+ "type": "integer"
+ },
"createdAt": {
"type": "string"
},
@@ -6161,11 +6995,31 @@
"tlsCertificate": {
"type": "string"
},
- "tlsPublicKey": {
- "type": "string"
+ "tlsPublicKey": {
+ "type": "string"
+ },
+ "updatedAt": {
+ "type": "string"
+ }
+ }
+ },
+ "handler.PaginatedOrganizationsResponse": {
+ "type": "object",
+ "properties": {
+ "count": {
+ "type": "integer"
+ },
+ "items": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/handler.OrganizationResponse"
+ }
+ },
+ "limit": {
+ "type": "integer"
},
- "updatedAt": {
- "type": "string"
+ "offset": {
+ "type": "integer"
}
}
},
@@ -6498,6 +7352,29 @@
}
}
},
+ "http.ChaincodeResponse": {
+ "type": "object",
+ "properties": {
+ "endorsementPlugin": {
+ "type": "string"
+ },
+ "initRequired": {
+ "type": "boolean"
+ },
+ "name": {
+ "type": "string"
+ },
+ "sequence": {
+ "type": "integer"
+ },
+ "validationPlugin": {
+ "type": "string"
+ },
+ "version": {
+ "type": "string"
+ }
+ }
+ },
"http.ChannelConfigResponse": {
"type": "object",
"properties": {
@@ -7560,6 +8437,12 @@
"internalIp": {
"type": "string"
},
+ "metricsEnabled": {
+ "type": "boolean"
+ },
+ "metricsPort": {
+ "type": "integer"
+ },
"networkId": {
"type": "integer"
},
@@ -7844,6 +8727,49 @@
}
}
},
+ "metrics.CustomQueryRequest": {
+ "type": "object",
+ "required": [
+ "query"
+ ],
+ "properties": {
+ "end": {
+ "type": "string"
+ },
+ "query": {
+ "type": "string"
+ },
+ "start": {
+ "type": "string"
+ },
+ "step": {
+ "type": "string"
+ }
+ }
+ },
+ "metrics.DeployPrometheusRequest": {
+ "type": "object",
+ "required": [
+ "deployment_mode",
+ "prometheus_port",
+ "prometheus_version",
+ "scrape_interval"
+ ],
+ "properties": {
+ "deployment_mode": {
+ "type": "string"
+ },
+ "prometheus_port": {
+ "type": "integer"
+ },
+ "prometheus_version": {
+ "type": "string"
+ },
+ "scrape_interval": {
+ "type": "integer"
+ }
+ }
+ },
"models.CertificateRequest": {
"type": "object",
"required": [
@@ -8301,14 +9227,7 @@
"response.ErrorResponse": {
"type": "object",
"properties": {
- "details": {
- "type": "object",
- "additionalProperties": true
- },
- "message": {
- "type": "string"
- },
- "type": {
+ "error": {
"type": "string"
}
}
@@ -8317,11 +9236,8 @@
"type": "object",
"properties": {
"data": {},
- "error": {
- "$ref": "#/definitions/response.ErrorResponse"
- },
- "success": {
- "type": "boolean"
+ "message": {
+ "type": "string"
}
}
},
@@ -8340,6 +9256,19 @@
"internalIp": {
"type": "string"
},
+ "metricsEnabled": {
+ "description": "Metrics configuration",
+ "type": "boolean"
+ },
+ "metricsHost": {
+ "type": "string"
+ },
+ "metricsPort": {
+ "type": "integer"
+ },
+ "metricsProtocol": {
+ "type": "string"
+ },
"mode": {
"$ref": "#/definitions/service.Mode"
},
@@ -8378,6 +9307,19 @@
"keyId": {
"type": "integer"
},
+ "metricsEnabled": {
+ "description": "Metrics configuration",
+ "type": "boolean"
+ },
+ "metricsHost": {
+ "type": "string"
+ },
+ "metricsPort": {
+ "type": "integer"
+ },
+ "metricsProtocol": {
+ "type": "string"
+ },
"mode": {
"type": "string"
},
@@ -8553,7 +9495,7 @@
"type": "integer"
},
"node": {
- "$ref": "#/definitions/service.Node"
+ "$ref": "#/definitions/service.NodeResponse"
},
"nodeId": {
"type": "integer"
@@ -8569,84 +9511,86 @@
}
}
},
- "service.Node": {
+ "service.NodeDefaults": {
"type": "object",
"properties": {
- "createdAt": {
+ "adminAddress": {
"type": "string"
},
- "deploymentConfig": {
- "description": "Node deployment configuration interface that can be one of: FabricPeerDeploymentConfig, FabricOrdererDeploymentConfig, or BesuNodeDeploymentConfig"
- },
- "endpoint": {
+ "chaincodeAddress": {
"type": "string"
},
- "errorMessage": {
+ "containerName": {
"type": "string"
},
- "id": {
- "type": "integer"
+ "errorLogPath": {
+ "type": "string"
},
- "mspId": {
+ "eventsAddress": {
"type": "string"
},
- "name": {
+ "externalEndpoint": {
"type": "string"
},
- "nodeConfig": {
- "description": "Base interface for all node configurations"
+ "listenAddress": {
+ "type": "string"
},
- "nodeType": {
- "$ref": "#/definitions/types.NodeType"
+ "logPath": {
+ "type": "string"
},
- "platform": {
- "$ref": "#/definitions/types.BlockchainPlatform"
+ "mode": {
+ "$ref": "#/definitions/service.Mode"
},
- "publicEndpoint": {
+ "operationsListenAddress": {
"type": "string"
},
- "status": {
- "$ref": "#/definitions/types.NodeStatus"
- },
- "updatedAt": {
+ "serviceName": {
"type": "string"
}
}
},
- "service.NodeDefaults": {
+ "service.NodeResponse": {
"type": "object",
"properties": {
- "adminAddress": {
- "type": "string"
+ "besuNode": {
+ "$ref": "#/definitions/service.BesuNodeProperties"
},
- "chaincodeAddress": {
+ "createdAt": {
"type": "string"
},
- "containerName": {
+ "endpoint": {
"type": "string"
},
- "errorLogPath": {
+ "errorMessage": {
"type": "string"
},
- "eventsAddress": {
- "type": "string"
+ "fabricOrderer": {
+ "$ref": "#/definitions/service.FabricOrdererProperties"
},
- "externalEndpoint": {
- "type": "string"
+ "fabricPeer": {
+ "description": "Type-specific fields",
+ "allOf": [
+ {
+ "$ref": "#/definitions/service.FabricPeerProperties"
+ }
+ ]
},
- "listenAddress": {
- "type": "string"
+ "id": {
+ "type": "integer"
},
- "logPath": {
+ "name": {
"type": "string"
},
- "mode": {
- "$ref": "#/definitions/service.Mode"
+ "nodeType": {
+ "$ref": "#/definitions/types.NodeType"
},
- "operationsListenAddress": {
+ "platform": {
"type": "string"
},
- "serviceName": {
+ "status": {
+ "type": "string"
+ },
+ "updatedAt": {
"type": "string"
}
}
@@ -8705,6 +9649,29 @@
}
}
},
+ "time.Duration": {
+ "type": "integer",
+ "enum": [
+ -9223372036854775808,
+ 9223372036854775807,
+ 1,
+ 1000,
+ 1000000,
+ 1000000000,
+ 60000000000,
+ 3600000000000
+ ],
+ "x-enum-varnames": [
+ "minDuration",
+ "maxDuration",
+ "Nanosecond",
+ "Microsecond",
+ "Millisecond",
+ "Second",
+ "Minute",
+ "Hour"
+ ]
+ },
"types.AddressOverride": {
"type": "object",
"properties": {
@@ -8753,6 +9720,15 @@
"keyId": {
"type": "integer"
},
+ "metricsEnabled": {
+ "type": "boolean"
+ },
+ "metricsPort": {
+ "type": "integer"
+ },
+ "metricsProtocol": {
+ "type": "string"
+ },
"mode": {
"description": "@Description The deployment mode (service or docker)",
"type": "string",
@@ -8777,6 +9753,9 @@
"description": "@Description The type of node (fabric-peer, fabric-orderer, besu)",
"type": "string",
"example": "fabric-peer"
+ },
+ "version": {
+ "type": "string"
}
}
},
@@ -8829,6 +9808,44 @@
}
}
},
+ "types.Documentation": {
+ "type": "object",
+ "properties": {
+ "examples": {
+ "description": "Examples contains example configurations and usage",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/types.Example"
+ }
+ },
+ "readme": {
+ "description": "README contains the main documentation for the plugin",
+ "type": "string"
+ },
+ "troubleshooting": {
+ "description": "Troubleshooting contains common issues and their solutions",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/types.TroubleshootingItem"
+ }
+ }
+ }
+ },
+ "types.Example": {
+ "type": "object",
+ "properties": {
+ "description": {
+ "type": "string"
+ },
+ "name": {
+ "type": "string"
+ },
+ "parameters": {
+ "type": "object",
+ "additionalProperties": true
+ }
+ }
+ },
"types.FabricOrdererConfig": {
"type": "object",
"required": [
@@ -8990,35 +10007,32 @@
"types.Metadata": {
"type": "object",
"properties": {
+ "author": {
+ "type": "string"
+ },
+ "description": {
+ "type": "string"
+ },
+ "license": {
+ "type": "string"
+ },
"name": {
"type": "string"
},
+ "repository": {
+ "type": "string"
+ },
+ "tags": {
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
"version": {
"type": "string"
}
}
},
- "types.NodeStatus": {
- "type": "string",
- "enum": [
- "PENDING",
- "RUNNING",
- "STOPPED",
- "STOPPING",
- "STARTING",
- "UPDATING",
- "ERROR"
- ],
- "x-enum-varnames": [
- "NodeStatusPending",
- "NodeStatusRunning",
- "NodeStatusStopped",
- "NodeStatusStopping",
- "NodeStatusStarting",
- "NodeStatusUpdating",
- "NodeStatusError"
- ]
- },
"types.NodeType": {
"type": "string",
"enum": [
@@ -9071,6 +10085,9 @@
},
"type": {
"type": "string"
+ },
+ "x-source": {
+ "$ref": "#/definitions/types.XSourceType"
}
}
},
@@ -9103,11 +10120,45 @@
"dockerCompose": {
"$ref": "#/definitions/types.DockerCompose"
},
+ "documentation": {
+ "$ref": "#/definitions/types.Documentation"
+ },
"parameters": {
"$ref": "#/definitions/github_com_chainlaunch_chainlaunch_pkg_plugin_types.Parameters"
}
}
},
+ "types.TroubleshootingItem": {
+ "type": "object",
+ "properties": {
+ "description": {
+ "type": "string"
+ },
+ "problem": {
+ "type": "string"
+ },
+ "solution": {
+ "type": "string"
+ }
+ }
+ },
+ "types.XSourceType": {
+ "type": "string",
+ "enum": [
+ "fabric-peer",
+ "key",
+ "fabric-org",
+ "fabric-network",
+ "fabric-key"
+ ],
+ "x-enum-varnames": [
+ "XSourceFabricPeer",
+ "XSourceKey",
+ "XSourceFabricOrg",
+ "XSourceFabricNetwork",
+ "XSourceFabricKey"
+ ]
+ },
"url.URL": {
"type": "object",
"properties": {
diff --git a/docs/swagger.yaml b/docs/swagger.yaml
index 0bd44b1..518ca39 100644
--- a/docs/swagger.yaml
+++ b/docs/swagger.yaml
@@ -1,5 +1,68 @@
basePath: /api/v1
definitions:
+ audit.Event:
+ properties:
+ affectedResource:
+ type: string
+ details:
+ additionalProperties: true
+ type: object
+ eventOutcome:
+ $ref: '#/definitions/audit.EventOutcome'
+ eventSource:
+ type: string
+ eventType:
+ type: string
+ id:
+ type: integer
+ requestId:
+ type: string
+ sessionId:
+ type: string
+ severity:
+ $ref: '#/definitions/audit.Severity'
+ sourceIp:
+ type: string
+ timestamp:
+ type: string
+ userIdentity:
+ type: integer
+ type: object
+ audit.EventOutcome:
+ enum:
+ - SUCCESS
+ - FAILURE
+ - PENDING
+ type: string
+ x-enum-varnames:
+ - EventOutcomeSuccess
+ - EventOutcomeFailure
+ - EventOutcomePending
+ audit.ListLogsResponse:
+ properties:
+ items:
+ items:
+ $ref: '#/definitions/audit.Event'
+ type: array
+ page:
+ type: integer
+ page_size:
+ type: integer
+ total_count:
+ type: integer
+ type: object
+ audit.Severity:
+ enum:
+ - DEBUG
+ - INFO
+ - WARNING
+ - CRITICAL
+ type: string
+ x-enum-varnames:
+ - SeverityDebug
+ - SeverityInfo
+ - SeverityWarning
+ - SeverityCritical
auth.ChangePasswordRequest:
properties:
current_password:
@@ -177,6 +240,61 @@ definitions:
- ORDERER_TRANSACTION
- DELIVER_SEEK_INFO
- CHAINCODE_PACKAGE
+ common.QueryResult:
+ properties:
+ data:
+ properties:
+ result:
+ items:
+ properties:
+ metric:
+ additionalProperties:
+ type: string
+ type: object
+ value:
+ description: For instant queries
+ items: {}
+ type: array
+ values:
+ description: For range queries (matrix)
+ items:
+ items: {}
+ type: array
+ type: array
+ type: object
+ type: array
+ resultType:
+ type: string
+ type: object
+ status:
+ type: string
+ type: object
+ github_com_chainlaunch_chainlaunch_pkg_metrics_common.Status:
+ properties:
+ deployment_mode:
+ description: DeploymentMode is the current deployment mode
+ type: string
+ error:
+ description: Error is any error that occurred while getting the status
+ type: string
+ port:
+ description: Port is the port Prometheus is listening on
+ type: integer
+ scrape_interval:
+ allOf:
+ - $ref: '#/definitions/time.Duration'
+ description: ScrapeInterval is the current scrape interval
+ started_at:
+ description: StartedAt is when the instance was started
+ type: string
+ status:
+ description: Status is the current status of the Prometheus instance (e.g.
+ "running", "stopped", "not_deployed")
+ type: string
+ version:
+ description: Version is the version of Prometheus being used
+ type: string
+ type: object
github_com_chainlaunch_chainlaunch_pkg_networks_http.ErrorResponse:
properties:
code:
@@ -250,6 +368,12 @@ definitions:
type: object
handler.OrganizationResponse:
properties:
+ adminSignKeyId:
+ type: integer
+ adminTlsKeyId:
+ type: integer
+ clientSignKeyId:
+ type: integer
createdAt:
type: string
description:
@@ -273,6 +397,19 @@ definitions:
updatedAt:
type: string
type: object
+ handler.PaginatedOrganizationsResponse:
+ properties:
+ count:
+ type: integer
+ items:
+ items:
+ $ref: '#/definitions/handler.OrganizationResponse'
+ type: array
+ limit:
+ type: integer
+ offset:
+ type: integer
+ type: object
handler.RevokeCertificateByPEMRequest:
properties:
certificate:
@@ -492,6 +629,21 @@ definitions:
previousBlockHash:
type: string
type: object
+ http.ChaincodeResponse:
+ properties:
+ endorsementPlugin:
+ type: string
+ initRequired:
+ type: boolean
+ name:
+ type: string
+ sequence:
+ type: integer
+ validationPlugin:
+ type: string
+ version:
+ type: string
+ type: object
http.ChannelConfigResponse:
properties:
config:
@@ -1284,6 +1436,10 @@ definitions:
type: string
internalIp:
type: string
+ metricsEnabled:
+ type: boolean
+ metricsPort:
+ type: integer
networkId:
type: integer
p2pHost:
@@ -1482,6 +1638,35 @@ definitions:
- name
- type
type: object
+ metrics.CustomQueryRequest:
+ properties:
+ end:
+ type: string
+ query:
+ type: string
+ start:
+ type: string
+ step:
+ type: string
+ required:
+ - query
+ type: object
+ metrics.DeployPrometheusRequest:
+ properties:
+ deployment_mode:
+ type: string
+ prometheus_port:
+ type: integer
+ prometheus_version:
+ type: string
+ scrape_interval:
+ type: integer
+ required:
+ - deployment_mode
+ - prometheus_port
+ - prometheus_version
+ - scrape_interval
+ type: object
models.CertificateRequest:
properties:
commonName:
@@ -1800,21 +1985,14 @@ definitions:
type: object
response.ErrorResponse:
properties:
- details:
- additionalProperties: true
- type: object
- message:
- type: string
- type:
+ error:
type: string
type: object
response.Response:
properties:
data: {}
- error:
- $ref: '#/definitions/response.ErrorResponse'
- success:
- type: boolean
+ message:
+ type: string
type: object
service.BesuNodeDefaults:
properties:
@@ -1826,6 +2004,15 @@ definitions:
type: string
internalIp:
type: string
+ metricsEnabled:
+ description: Metrics configuration
+ type: boolean
+ metricsHost:
+ type: string
+ metricsPort:
+ type: integer
+ metricsProtocol:
+ type: string
mode:
$ref: '#/definitions/service.Mode'
p2pHost:
@@ -1851,6 +2038,15 @@ definitions:
type: string
keyId:
type: integer
+ metricsEnabled:
+ description: Metrics configuration
+ type: boolean
+ metricsHost:
+ type: string
+ metricsPort:
+ type: integer
+ metricsProtocol:
+ type: string
mode:
type: string
networkId:
@@ -1969,7 +2165,7 @@ definitions:
networkId:
type: integer
node:
- $ref: '#/definitions/service.Node'
+ $ref: '#/definitions/service.NodeResponse'
nodeId:
type: integer
role:
@@ -1979,36 +2175,6 @@ definitions:
updatedAt:
type: string
type: object
- service.Node:
- properties:
- createdAt:
- type: string
- deploymentConfig:
- description: 'Node deployment configuration interface that can be one of:
- FabricPeerDeploymentConfig, FabricOrdererDeploymentConfig, or BesuNodeDeploymentConfig'
- endpoint:
- type: string
- errorMessage:
- type: string
- id:
- type: integer
- mspId:
- type: string
- name:
- type: string
- nodeConfig:
- description: Base interface for all node configurations
- nodeType:
- $ref: '#/definitions/types.NodeType'
- platform:
- $ref: '#/definitions/types.BlockchainPlatform'
- publicEndpoint:
- type: string
- status:
- $ref: '#/definitions/types.NodeStatus'
- updatedAt:
- type: string
- type: object
service.NodeDefaults:
properties:
adminAddress:
@@ -2034,6 +2200,35 @@ definitions:
serviceName:
type: string
type: object
+ service.NodeResponse:
+ properties:
+ besuNode:
+ $ref: '#/definitions/service.BesuNodeProperties'
+ createdAt:
+ type: string
+ endpoint:
+ type: string
+ errorMessage:
+ type: string
+ fabricOrderer:
+ $ref: '#/definitions/service.FabricOrdererProperties'
+ fabricPeer:
+ allOf:
+ - $ref: '#/definitions/service.FabricPeerProperties'
+ description: Type-specific fields
+ id:
+ type: integer
+ name:
+ type: string
+ nodeType:
+ $ref: '#/definitions/types.NodeType'
+ platform:
+ type: string
+ status:
+ type: string
+ updatedAt:
+ type: string
+ type: object
service.NodesDefaultsResult:
properties:
availableAddresses:
@@ -2069,6 +2264,26 @@ definitions:
peerTemplateCMD:
type: string
type: object
+ time.Duration:
+ enum:
+ - -9223372036854775808
+ - 9223372036854775807
+ - 1
+ - 1000
+ - 1000000
+ - 1000000000
+ - 60000000000
+ - 3600000000000
+ type: integer
+ x-enum-varnames:
+ - minDuration
+ - maxDuration
+ - Nanosecond
+ - Microsecond
+ - Millisecond
+ - Second
+ - Minute
+ - Hour
types.AddressOverride:
properties:
from:
@@ -2094,6 +2309,12 @@ definitions:
type: string
keyId:
type: integer
+ metricsEnabled:
+ type: boolean
+ metricsPort:
+ type: integer
+ metricsProtocol:
+ type: string
mode:
description: '@Description The deployment mode (service or docker)'
example: service
@@ -2113,6 +2334,8 @@ definitions:
besu)'
example: fabric-peer
type: string
+ version:
+ type: string
required:
- externalIp
- internalIp
@@ -2156,6 +2379,32 @@ definitions:
contents:
type: string
type: object
+ types.Documentation:
+ properties:
+ examples:
+ description: Examples contains example configurations and usage
+ items:
+ $ref: '#/definitions/types.Example'
+ type: array
+ readme:
+ description: README contains the main documentation for the plugin
+ type: string
+ troubleshooting:
+ description: Troubleshooting contains common issues and their solutions
+ items:
+ $ref: '#/definitions/types.TroubleshootingItem'
+ type: array
+ type: object
+ types.Example:
+ properties:
+ description:
+ type: string
+ name:
+ type: string
+ parameters:
+ additionalProperties: true
+ type: object
+ type: object
types.FabricOrdererConfig:
properties:
addressOverrides:
@@ -2277,29 +2526,23 @@ definitions:
type: object
types.Metadata:
properties:
+ author:
+ type: string
+ description:
+ type: string
+ license:
+ type: string
name:
type: string
+ repository:
+ type: string
+ tags:
+ items:
+ type: string
+ type: array
version:
type: string
type: object
- types.NodeStatus:
- enum:
- - PENDING
- - RUNNING
- - STOPPED
- - STOPPING
- - STARTING
- - UPDATING
- - ERROR
- type: string
- x-enum-varnames:
- - NodeStatusPending
- - NodeStatusRunning
- - NodeStatusStopped
- - NodeStatusStopping
- - NodeStatusStarting
- - NodeStatusUpdating
- - NodeStatusError
types.NodeType:
enum:
- FABRIC_PEER
@@ -2338,6 +2581,8 @@ definitions:
type: array
type:
type: string
+ x-source:
+ $ref: '#/definitions/types.XSourceType'
type: object
types.Service:
properties:
@@ -2358,116 +2603,502 @@ definitions:
properties:
dockerCompose:
$ref: '#/definitions/types.DockerCompose'
+ documentation:
+ $ref: '#/definitions/types.Documentation'
+ parameters:
+ $ref: '#/definitions/github_com_chainlaunch_chainlaunch_pkg_plugin_types.Parameters'
+ type: object
+ types.TroubleshootingItem:
+ properties:
+ description:
+ type: string
+ problem:
+ type: string
+ solution:
+ type: string
+ type: object
+ types.XSourceType:
+ enum:
+ - fabric-peer
+ - key
+ - fabric-org
+ - fabric-network
+ - fabric-key
+ type: string
+ x-enum-varnames:
+ - XSourceFabricPeer
+ - XSourceKey
+ - XSourceFabricOrg
+ - XSourceFabricNetwork
+ - XSourceFabricKey
+ url.URL:
+ properties:
+ forceQuery:
+ description: append a query ('?') even if RawQuery is empty
+ type: boolean
+ fragment:
+ description: fragment for references, without '#'
+ type: string
+ host:
+ description: host or host:port (see Hostname and Port methods)
+ type: string
+ omitHost:
+ description: do not emit empty host (authority)
+ type: boolean
+ opaque:
+ description: encoded opaque data
+ type: string
+ path:
+ description: path (relative paths may omit leading slash)
+ type: string
+ rawFragment:
+ description: encoded fragment hint (see EscapedFragment method)
+ type: string
+ rawPath:
+ description: encoded path hint (see EscapedPath method)
+ type: string
+ rawQuery:
+ description: encoded query values, without '?'
+ type: string
+ scheme:
+ type: string
+ user:
+ allOf:
+ - $ref: '#/definitions/url.Userinfo'
+ description: username and password information
+ type: object
+ url.Userinfo:
+ type: object
+ x509.ExtKeyUsage:
+ enum:
+ - 0
+ - 1
+ - 2
+ - 3
+ - 4
+ - 5
+ - 6
+ - 7
+ - 8
+ - 9
+ - 10
+ - 11
+ - 12
+ - 13
+ type: integer
+ x-enum-varnames:
+ - ExtKeyUsageAny
+ - ExtKeyUsageServerAuth
+ - ExtKeyUsageClientAuth
+ - ExtKeyUsageCodeSigning
+ - ExtKeyUsageEmailProtection
+ - ExtKeyUsageIPSECEndSystem
+ - ExtKeyUsageIPSECTunnel
+ - ExtKeyUsageIPSECUser
+ - ExtKeyUsageTimeStamping
+ - ExtKeyUsageOCSPSigning
+ - ExtKeyUsageMicrosoftServerGatedCrypto
+ - ExtKeyUsageNetscapeServerGatedCrypto
+ - ExtKeyUsageMicrosoftCommercialCodeSigning
+ - ExtKeyUsageMicrosoftKernelCodeSigning
+ x509.KeyUsage:
+ enum:
+ - 1
+ - 2
+ - 4
+ - 8
+ - 16
+ - 32
+ - 64
+ - 128
+ - 256
+ type: integer
+ x-enum-varnames:
+ - KeyUsageDigitalSignature
+ - KeyUsageContentCommitment
+ - KeyUsageKeyEncipherment
+ - KeyUsageDataEncipherment
+ - KeyUsageKeyAgreement
+ - KeyUsageCertSign
+ - KeyUsageCRLSign
+ - KeyUsageEncipherOnly
+ - KeyUsageDecipherOnly
+host: localhost:8100
+info:
+ contact:
+ email: support@chainlaunch.dev
+ name: API Support
+ url: http://chainlaunch.dev/support
+ description: ChainLaunch API provides services for managing blockchain networks
+ and cryptographic keys
+ license:
+ name: Apache 2.0
+ url: http://www.apache.org/licenses/LICENSE-2.0.html
+ termsOfService: http://swagger.io/terms/
+ title: ChainLaunch API
+ version: "1.0"
+paths:
+ /api/v1/metrics/deploy:
+ post:
+ consumes:
+ - application/json
+ description: Deploys a new Prometheus instance with the specified configuration
+ parameters:
+ - description: Prometheus deployment configuration
+ in: body
+ name: request
+ required: true
+ schema:
+ $ref: '#/definitions/metrics.DeployPrometheusRequest'
+ produces:
+ - application/json
+ responses:
+ "200":
+ description: OK
+ schema:
+ additionalProperties:
+ type: string
+ type: object
+ "400":
+ description: Bad Request
+ schema:
+ additionalProperties:
+ type: string
+ type: object
+ "500":
+ description: Internal Server Error
+ schema:
+ additionalProperties:
+ type: string
+ type: object
+ summary: Deploy a new Prometheus instance
+ tags:
+ - metrics
+ /api/v1/metrics/node/{id}:
+ get:
+ description: Retrieves metrics for a specific node by ID and optional PromQL
+ query
+ parameters:
+ - description: Node ID
+ in: path
+ name: id
+ required: true
+ type: string
+ - description: PromQL query to filter metrics
+ in: query
+ name: query
+ type: string
+ produces:
+ - application/json
+ responses:
+ "200":
+ description: OK
+ schema:
+ additionalProperties: true
+ type: object
+ "400":
+ description: Bad Request
+ schema:
+ additionalProperties:
+ type: string
+ type: object
+ "500":
+ description: Internal Server Error
+ schema:
+ additionalProperties:
+ type: string
+ type: object
+ summary: Get metrics for a specific node
+ tags:
+ - metrics
+ /api/v1/metrics/node/{id}/label/{label}/values:
+ get:
+ consumes:
+ - application/json
+ description: Retrieves all values for a specific label, optionally filtered
+ by metric matches and node ID
+ parameters:
+ - description: Node ID
+ in: path
+ name: id
+ required: true
+ type: string
+ - description: Label name
+ in: path
+ name: label
+ required: true
+ type: string
+ - description: Metric matches (e.g. {__name__=\
+ in: query
+ name: match
+ type: array
+ produces:
+ - application/json
+ responses:
+ "200":
+ description: Label values
+ schema:
+ additionalProperties: true
+ type: object
+ "400":
+ description: Bad request
+ schema:
+ additionalProperties: true
+ type: object
+ "500":
+ description: Internal server error
+ schema:
+ additionalProperties: true
+ type: object
+ summary: Get label values for a specific label
+ tags:
+ - metrics
+ /api/v1/metrics/node/{id}/query:
+ post:
+ consumes:
+ - application/json
+ description: Execute a custom Prometheus query with optional time range
+ parameters:
+ - description: Node ID
+ in: path
+ name: id
+ required: true
+ type: string
+ - description: Query parameters
+ in: body
+ name: request
+ required: true
+ schema:
+ $ref: '#/definitions/metrics.CustomQueryRequest'
+ produces:
+ - application/json
+ responses:
+ "200":
+ description: OK
+ schema:
+ $ref: '#/definitions/common.QueryResult'
+ "400":
+ description: Bad Request
+ schema:
+ additionalProperties:
+ type: string
+ type: object
+ "500":
+ description: Internal Server Error
+ schema:
+ additionalProperties:
+ type: string
+ type: object
+ summary: Execute custom Prometheus query
+ tags:
+ - metrics
+ /api/v1/metrics/node/{id}/range:
+ get:
+ consumes:
+ - application/json
+ description: Retrieves metrics for a specific node within a specified time range
+ parameters:
+ - description: Node ID
+ in: path
+ name: id
+ required: true
+ type: string
+ - description: PromQL query
+ in: query
+ name: query
+ required: true
+ type: string
+ - description: Start time (RFC3339 format)
+ in: query
+ name: start
+ required: true
+ type: string
+ - description: End time (RFC3339 format)
+ in: query
+ name: end
+ required: true
+ type: string
+ - description: Step duration (e.g. 1m, 5m, 1h)
+ in: query
+ name: step
+ required: true
+ type: string
+ produces:
+ - application/json
+ responses:
+ "200":
+ description: Metrics data
+ schema:
+ additionalProperties: true
+ type: object
+ "400":
+ description: Bad request
+ schema:
+ additionalProperties: true
+ type: object
+ "500":
+ description: Internal server error
+ schema:
+ additionalProperties: true
+ type: object
+ summary: Get metrics for a specific node with time range
+ tags:
+ - metrics
+ /api/v1/metrics/reload:
+ post:
+ description: Triggers a reload of the Prometheus configuration to pick up any
+ changes
+ produces:
+ - application/json
+ responses:
+ "200":
+ description: OK
+ schema:
+ additionalProperties:
+ type: string
+ type: object
+ "500":
+ description: Internal Server Error
+ schema:
+ additionalProperties:
+ type: string
+ type: object
+ summary: Reload Prometheus configuration
+ tags:
+ - metrics
+ /api/v1/metrics/status:
+ get:
+ description: Returns the current status of the Prometheus instance including
+ version, port, and configuration
+ produces:
+ - application/json
+ responses:
+ "200":
+ description: OK
+ schema:
+ $ref: '#/definitions/github_com_chainlaunch_chainlaunch_pkg_metrics_common.Status'
+ "500":
+ description: Internal Server Error
+ schema:
+ additionalProperties:
+ type: string
+ type: object
+ summary: Get Prometheus status
+ tags:
+ - metrics
+ /audit/logs:
+ get:
+ consumes:
+ - application/json
+ description: Retrieves a paginated list of audit logs with optional filters
parameters:
- $ref: '#/definitions/github_com_chainlaunch_chainlaunch_pkg_plugin_types.Parameters'
- type: object
- url.URL:
- properties:
- forceQuery:
- description: append a query ('?') even if RawQuery is empty
- type: boolean
- fragment:
- description: fragment for references, without '#'
- type: string
- host:
- description: host or host:port (see Hostname and Port methods)
- type: string
- omitHost:
- description: do not emit empty host (authority)
- type: boolean
- opaque:
- description: encoded opaque data
- type: string
- path:
- description: path (relative paths may omit leading slash)
+ - description: 'Page number (default: 1)'
+ in: query
+ name: page
+ type: integer
+ - description: 'Page size (default: 10)'
+ in: query
+ name: page_size
+ type: integer
+ - description: Start time (RFC3339 format)
+ in: query
+ name: start
type: string
- rawFragment:
- description: encoded fragment hint (see EscapedFragment method)
+ - description: End time (RFC3339 format)
+ in: query
+ name: end
type: string
- rawPath:
- description: encoded path hint (see EscapedPath method)
+ - description: Filter by event type
+ in: query
+ name: event_type
type: string
- rawQuery:
- description: encoded query values, without '?'
+ - description: Filter by user ID
+ in: query
+ name: user_id
type: string
- scheme:
+ produces:
+ - application/json
+ responses:
+ "200":
+ description: OK
+ schema:
+ $ref: '#/definitions/audit.ListLogsResponse'
+ "400":
+ description: Bad Request
+ schema:
+ additionalProperties:
+ type: string
+ type: object
+ "401":
+ description: Unauthorized
+ schema:
+ additionalProperties:
+ type: string
+ type: object
+ "403":
+ description: Forbidden
+ schema:
+ additionalProperties:
+ type: string
+ type: object
+ "500":
+ description: Internal Server Error
+ schema:
+ additionalProperties:
+ type: string
+ type: object
+ summary: List audit logs
+ tags:
+ - audit
+ /audit/logs/{id}:
+ get:
+ description: Retrieves a specific audit log by ID
+ parameters:
+ - description: Log ID
+ in: path
+ name: id
+ required: true
type: string
- user:
- allOf:
- - $ref: '#/definitions/url.Userinfo'
- description: username and password information
- type: object
- url.Userinfo:
- type: object
- x509.ExtKeyUsage:
- enum:
- - 0
- - 1
- - 2
- - 3
- - 4
- - 5
- - 6
- - 7
- - 8
- - 9
- - 10
- - 11
- - 12
- - 13
- type: integer
- x-enum-varnames:
- - ExtKeyUsageAny
- - ExtKeyUsageServerAuth
- - ExtKeyUsageClientAuth
- - ExtKeyUsageCodeSigning
- - ExtKeyUsageEmailProtection
- - ExtKeyUsageIPSECEndSystem
- - ExtKeyUsageIPSECTunnel
- - ExtKeyUsageIPSECUser
- - ExtKeyUsageTimeStamping
- - ExtKeyUsageOCSPSigning
- - ExtKeyUsageMicrosoftServerGatedCrypto
- - ExtKeyUsageNetscapeServerGatedCrypto
- - ExtKeyUsageMicrosoftCommercialCodeSigning
- - ExtKeyUsageMicrosoftKernelCodeSigning
- x509.KeyUsage:
- enum:
- - 1
- - 2
- - 4
- - 8
- - 16
- - 32
- - 64
- - 128
- - 256
- type: integer
- x-enum-varnames:
- - KeyUsageDigitalSignature
- - KeyUsageContentCommitment
- - KeyUsageKeyEncipherment
- - KeyUsageDataEncipherment
- - KeyUsageKeyAgreement
- - KeyUsageCertSign
- - KeyUsageCRLSign
- - KeyUsageEncipherOnly
- - KeyUsageDecipherOnly
-host: localhost:8100
-info:
- contact:
- email: support@chainlaunch.dev
- name: API Support
- url: http://chainlaunch.dev/support
- description: ChainLaunch API provides services for managing blockchain networks
- and cryptographic keys
- license:
- name: Apache 2.0
- url: http://www.apache.org/licenses/LICENSE-2.0.html
- termsOfService: http://swagger.io/terms/
- title: ChainLaunch API
- version: "1.0"
-paths:
+ produces:
+ - application/json
+ responses:
+ "200":
+ description: OK
+ schema:
+ $ref: '#/definitions/audit.Event'
+ "400":
+ description: Bad Request
+ schema:
+ additionalProperties:
+ type: string
+ type: object
+ "401":
+ description: Unauthorized
+ schema:
+ additionalProperties:
+ type: string
+ type: object
+ "403":
+ description: Forbidden
+ schema:
+ additionalProperties:
+ type: string
+ type: object
+ "404":
+ description: Not Found
+ schema:
+ additionalProperties:
+ type: string
+ type: object
+ "500":
+ description: Internal Server Error
+ schema:
+ additionalProperties:
+ type: string
+ type: object
+ summary: Get audit log
+ tags:
+ - audit
/auth/change-password:
post:
consumes:
@@ -4773,6 +5404,47 @@ paths:
summary: Get channels for a Fabric node
tags:
- Nodes
+ /nodes/{id}/channels/{channelID}/chaincodes:
+ get:
+ consumes:
+ - application/json
+ description: Retrieves all committed chaincodes for a specific channel on a
+ Fabric peer node
+ parameters:
+ - description: Node ID
+ in: path
+ name: id
+ required: true
+ type: integer
+ - description: Channel ID
+ in: path
+ name: channelID
+ required: true
+ type: string
+ produces:
+ - application/json
+ responses:
+ "200":
+ description: OK
+ schema:
+ items:
+ $ref: '#/definitions/http.ChaincodeResponse'
+ type: array
+ "400":
+ description: Validation error
+ schema:
+ $ref: '#/definitions/response.ErrorResponse'
+ "404":
+ description: Node not found
+ schema:
+ $ref: '#/definitions/response.ErrorResponse'
+ "500":
+ description: Internal server error
+ schema:
+ $ref: '#/definitions/response.ErrorResponse'
+ summary: Get committed chaincodes for a Fabric peer
+ tags:
+ - Nodes
/nodes/{id}/events:
get:
consumes:
@@ -5291,15 +5963,24 @@ paths:
consumes:
- application/json
description: Get a list of all Fabric organizations
+ parameters:
+ - default: 20
+ description: Maximum number of organizations to return
+ in: query
+ name: limit
+ type: integer
+ - default: 0
+ description: Number of organizations to skip
+ in: query
+ name: offset
+ type: integer
produces:
- application/json
responses:
"200":
description: OK
schema:
- items:
- $ref: '#/definitions/handler.OrganizationResponse'
- type: array
+ $ref: '#/definitions/handler.PaginatedOrganizationsResponse'
"500":
description: Internal Server Error
schema:
@@ -5891,6 +6572,37 @@ paths:
summary: Get detailed deployment status
tags:
- Plugins
+ /plugins/{name}/resume:
+ post:
+ consumes:
+ - application/json
+ description: Resume a previously deployed plugin
+ parameters:
+ - description: Plugin name
+ in: path
+ name: name
+ required: true
+ type: string
+ produces:
+ - application/json
+ responses:
+ "200":
+ description: OK
+ schema:
+ additionalProperties:
+ type: string
+ type: object
+ "404":
+ description: Not Found
+ schema:
+ $ref: '#/definitions/response.Response'
+ "500":
+ description: Internal Server Error
+ schema:
+ $ref: '#/definitions/response.Response'
+ summary: Resume a plugin deployment
+ tags:
+ - Plugins
/plugins/{name}/services:
get:
consumes:
diff --git a/e2e/api_test.go b/e2e/api_test.go
new file mode 100644
index 0000000..c953bf6
--- /dev/null
+++ b/e2e/api_test.go
@@ -0,0 +1,92 @@
+//go:build e2e
+// +build e2e
+
+package e2e
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/lithammer/shortuuid/v4"
+ "github.com/stretchr/testify/require"
+
+ "github.com/chainlaunch/chainlaunch/pkg/common/ports"
+ orgtypes "github.com/chainlaunch/chainlaunch/pkg/fabric/handler"
+ "github.com/chainlaunch/chainlaunch/pkg/logger"
+ nodeshttp "github.com/chainlaunch/chainlaunch/pkg/nodes/http"
+ nodetypes "github.com/chainlaunch/chainlaunch/pkg/nodes/types"
+)
+
+// TestCreateNode tests the node creation flow
+func TestCreateNode(t *testing.T) {
+ client, err := NewTestClient()
+ require.NoError(t, err)
+
+ // Generate random names
+ orgName := fmt.Sprintf("org-%s", shortuuid.New())
+ nodeName := fmt.Sprintf("node-%s", shortuuid.New())
+ peerName := fmt.Sprintf("peer-%s", shortuuid.New())
+ mspID := fmt.Sprintf("MSP-%s", shortuuid.New())
+
+ // Create a fabric organization
+ orgReq := &orgtypes.CreateOrganizationRequest{
+ Name: orgName,
+ MspID: mspID,
+ Description: fmt.Sprintf("Description for %s", orgName),
+ ProviderID: 1,
+ }
+ orgResp, err := client.CreateOrganization(orgReq)
+ require.NoError(t, err)
+ orgID := orgResp.ID
+ logger := logger.NewDefault()
+ logger.Info("Created organization", "id", orgID)
+ // Get free ports for the Fabric peer
+ peerPort, err := ports.GetFreePort("fabric-peer")
+ require.NoError(t, err)
+ defer ports.ReleasePort(peerPort.Port)
+
+ eventsPort, err := ports.GetFreePort("fabric-peer")
+ require.NoError(t, err)
+ defer ports.ReleasePort(eventsPort.Port)
+
+ operationsPort, err := ports.GetFreePort("fabric-peer")
+ require.NoError(t, err)
+ defer ports.ReleasePort(operationsPort.Port)
+
+ chaincodePort, err := ports.GetFreePort("fabric-peer")
+ require.NoError(t, err)
+ defer ports.ReleasePort(chaincodePort.Port)
+
+ // First create a node
+ fabricCreateReq := &nodeshttp.CreateNodeRequest{
+ Name: nodeName,
+ BlockchainPlatform: "FABRIC",
+ FabricPeer: &nodetypes.FabricPeerConfig{
+ BaseNodeConfig: nodetypes.BaseNodeConfig{
+ Type: "fabric-peer",
+ Mode: "docker",
+ },
+ Name: peerName,
+ MSPID: mspID,
+ OrganizationID: orgID,
+ ListenAddress: fmt.Sprintf("0.0.0.0:%d", peerPort.Port),
+ EventsAddress: fmt.Sprintf("0.0.0.0:%d", eventsPort.Port),
+ OperationsListenAddress: fmt.Sprintf("0.0.0.0:%d", operationsPort.Port),
+ ExternalEndpoint: fmt.Sprintf("localhost:%d", peerPort.Port),
+ ChaincodeAddress: fmt.Sprintf("localhost:%d", chaincodePort.Port),
+ DomainNames: []string{},
+ Env: map[string]string{},
+ Version: "3.1.0",
+ OrdererAddressOverrides: []nodetypes.OrdererAddressOverride{},
+ AddressOverrides: []nodetypes.AddressOverride{},
+ },
+ }
+
+ fabricCreateResp, err := client.CreateNode(fabricCreateReq)
+ require.NoError(t, err)
+ fabricID := fabricCreateResp.ID
+ // Check node status
+ require.Equal(t, "RUNNING", fabricCreateResp.Status)
+ // Assert that the fabric node ID is not zero
+ require.NotZero(t, fabricID, "Fabric node ID should not be zero")
+}
diff --git a/e2e/client.go b/e2e/client.go
new file mode 100644
index 0000000..cbd4cb8
--- /dev/null
+++ b/e2e/client.go
@@ -0,0 +1,198 @@
+package e2e
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io"
+ nethttp "net/http"
+ "os"
+
+ orgtypes "github.com/chainlaunch/chainlaunch/pkg/fabric/handler"
+ networkshttp "github.com/chainlaunch/chainlaunch/pkg/networks/http"
+ nodeshttp "github.com/chainlaunch/chainlaunch/pkg/nodes/http"
+)
+
+const (
+ defaultAPIURL = "http://localhost:8080/api/v1"
+)
+
+// TestClient represents a test API client
+type TestClient struct {
+ baseURL string
+ username string
+ password string
+}
+
+// NewTestClient creates a new test client using environment variables
+func NewTestClient() (*TestClient, error) {
+ apiURL := os.Getenv("API_BASE_URL")
+ if apiURL == "" {
+ apiURL = defaultAPIURL
+ }
+
+ username := os.Getenv("API_USERNAME")
+ if username == "" {
+ return nil, fmt.Errorf("API_USERNAME environment variable is not set")
+ }
+
+ password := os.Getenv("API_PASSWORD")
+ if password == "" {
+ return nil, fmt.Errorf("API_PASSWORD environment variable is not set")
+ }
+
+ return &TestClient{
+ baseURL: apiURL,
+ username: username,
+ password: password,
+ }, nil
+}
+
+// DoRequest performs an HTTP request with authentication
+func (c *TestClient) DoRequest(method, path string, body interface{}) (*nethttp.Response, error) {
+ var reqBody io.Reader
+
+ if body != nil {
+ jsonBody, err := json.Marshal(body)
+ if err != nil {
+ return nil, fmt.Errorf("failed to marshal request body: %w", err)
+ }
+ reqBody = bytes.NewBuffer(jsonBody)
+ }
+
+ // Create HTTP request
+ req, err := nethttp.NewRequest(method, fmt.Sprintf("%s%s", c.baseURL, path), reqBody)
+ if err != nil {
+ return nil, fmt.Errorf("failed to create request: %w", err)
+ }
+
+ req.SetBasicAuth(c.username, c.password)
+ if body != nil {
+ req.Header.Set("Content-Type", "application/json")
+ }
+
+ // Send request
+ client := &nethttp.Client{}
+ resp, err := client.Do(req)
+ if err != nil {
+ return nil, fmt.Errorf("failed to send request: %w", err)
+ }
+
+ return resp, nil
+}
+
+// CreateNode creates a new node
+func (c *TestClient) CreateNode(req *nodeshttp.CreateNodeRequest) (*nodeshttp.NodeResponse, error) {
+ resp, err := c.DoRequest(nethttp.MethodPost, "/nodes", req)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode != nethttp.StatusCreated {
+ body, _ := io.ReadAll(resp.Body)
+ return nil, fmt.Errorf("unexpected status code %d: %s", resp.StatusCode, string(body))
+ }
+
+ var nodeResp nodeshttp.NodeResponse
+ if err := json.NewDecoder(resp.Body).Decode(&nodeResp); err != nil {
+ return nil, fmt.Errorf("failed to decode response: %w", err)
+ }
+
+ return &nodeResp, nil
+}
+
+// CreateOrganization creates a new fabric organization
+func (c *TestClient) CreateOrganization(req *orgtypes.CreateOrganizationRequest) (*orgtypes.OrganizationResponse, error) {
+ resp, err := c.DoRequest(nethttp.MethodPost, "/organizations", req)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode != nethttp.StatusCreated {
+ body, _ := io.ReadAll(resp.Body)
+ return nil, fmt.Errorf("unexpected status code %d: %s", resp.StatusCode, string(body))
+ }
+
+ var orgResp orgtypes.OrganizationResponse
+ if err := json.NewDecoder(resp.Body).Decode(&orgResp); err != nil {
+ return nil, fmt.Errorf("failed to decode response: %w", err)
+ }
+
+ return &orgResp, nil
+}
+
+// AddNodeToNetwork adds a node to a network
+func (c *TestClient) AddNodeToNetwork(networkID int64, req *networkshttp.AddNodeToNetworkRequest) error {
+ resp, err := c.DoRequest(nethttp.MethodPost, fmt.Sprintf("/networks/%d/nodes", networkID), req)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode != nethttp.StatusCreated {
+ body, _ := io.ReadAll(resp.Body)
+ return fmt.Errorf("unexpected status code %d: %s", resp.StatusCode, string(body))
+ }
+
+ return nil
+}
+
+// GetNetwork retrieves a network by ID
+func (c *TestClient) GetNetwork(id int64) (*networkshttp.NetworkResponse, error) {
+ resp, err := c.DoRequest(nethttp.MethodGet, fmt.Sprintf("/networks/%d", id), nil)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode != nethttp.StatusOK {
+ body, _ := io.ReadAll(resp.Body)
+ return nil, fmt.Errorf("unexpected status code %d: %s", resp.StatusCode, string(body))
+ }
+
+ var networkResp networkshttp.NetworkResponse
+ if err := json.NewDecoder(resp.Body).Decode(&networkResp); err != nil {
+ return nil, fmt.Errorf("failed to decode response: %w", err)
+ }
+
+ return &networkResp, nil
+}
+
+// DeleteNetwork deletes a network by ID
+func (c *TestClient) DeleteNetwork(id int64) error {
+ resp, err := c.DoRequest(nethttp.MethodDelete, fmt.Sprintf("/networks/%d", id), nil)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode != nethttp.StatusOK {
+ body, _ := io.ReadAll(resp.Body)
+ return fmt.Errorf("unexpected status code %d: %s", resp.StatusCode, string(body))
+ }
+
+ return nil
+}
+
+// ListNetworks retrieves a list of networks
+func (c *TestClient) ListNetworks() (*networkshttp.ListNetworksResponse, error) {
+ resp, err := c.DoRequest(nethttp.MethodGet, "/networks", nil)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode != nethttp.StatusOK {
+ body, _ := io.ReadAll(resp.Body)
+ return nil, fmt.Errorf("unexpected status code %d: %s", resp.StatusCode, string(body))
+ }
+
+ var listResp networkshttp.ListNetworksResponse
+ if err := json.NewDecoder(resp.Body).Decode(&listResp); err != nil {
+ return nil, fmt.Errorf("failed to decode response: %w", err)
+ }
+
+ return &listResp, nil
+}
diff --git a/go.mod b/go.mod
index df430a8..bdc702b 100644
--- a/go.mod
+++ b/go.mod
@@ -4,7 +4,7 @@ go 1.23.8
require (
github.com/Masterminds/sprig/v3 v3.3.0
- github.com/docker/docker v28.1.1+incompatible
+ github.com/docker/docker v28.1.0+incompatible
github.com/docker/go-connections v0.5.0
github.com/ethereum/go-ethereum v1.15.1
github.com/go-chi/chi/v5 v5.2.0
@@ -13,7 +13,7 @@ require (
github.com/golang/protobuf v1.5.4
github.com/google/uuid v1.6.0
github.com/hyperledger/fabric-config v0.3.0
- github.com/libp2p/go-libp2p v0.27.8
+ github.com/libp2p/go-libp2p v0.27.8 // indirect
github.com/mattn/go-sqlite3 v1.14.24
github.com/pkg/errors v0.9.1
github.com/robfig/cron/v3 v3.0.1
@@ -35,6 +35,7 @@ require (
github.com/stretchr/testify v1.10.0
google.golang.org/grpc v1.72.0
google.golang.org/protobuf v1.36.6
+ gopkg.in/yaml.v2 v2.4.0
gopkg.in/yaml.v3 v3.0.1
)
@@ -124,6 +125,7 @@ require (
github.com/hashicorp/go-version v1.7.0 // indirect
github.com/holiman/uint256 v1.3.2 // indirect
github.com/huandu/xstrings v1.5.0 // indirect
+ github.com/hyperledger/fabric-protos-go v0.3.7 // indirect
github.com/in-toto/in-toto-golang v0.9.0 // indirect
github.com/inhies/go-bytesize v0.0.0-20220417184213-4913239db9cf // indirect
github.com/ipfs/boxo v0.12.0 // indirect
@@ -134,10 +136,11 @@ require (
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect
github.com/kevinburke/ssh_config v1.2.0 // indirect
github.com/klauspost/compress v1.18.0 // indirect
- github.com/klauspost/cpuid/v2 v2.2.4 // indirect
+ github.com/klauspost/cpuid/v2 v2.2.7 // indirect
github.com/leodido/go-urn v1.4.0 // indirect
github.com/libp2p/go-buffer-pool v0.1.0 // indirect
github.com/libp2p/go-flow-metrics v0.1.0 // indirect
+ github.com/lithammer/shortuuid/v4 v4.2.0 // indirect
github.com/mattn/go-colorable v0.1.14 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/mattn/go-runewidth v0.0.16 // indirect
diff --git a/go.sum b/go.sum
index 0f46a2d..a95d5c4 100644
--- a/go.sum
+++ b/go.sum
@@ -112,7 +112,7 @@ github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb h1:EDmT6Q9Zs+SbUo
github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb/go.mod h1:ZjrT6AXHbDs86ZSdt/osfBi5qfexBrKUdONk989Wnk4=
github.com/compose-spec/compose-go/v2 v2.6.1 h1:276YiQKRcGGtgkxiymzWHJ2CTv5joQA+7DTNrUA+rys=
github.com/compose-spec/compose-go/v2 v2.6.1/go.mod h1:vPlkN0i+0LjLf9rv52lodNMUTJF5YHVfHVGLLIP67NA=
-github.com/containerd/cgroups v1.0.4 h1:jN/mbWBEaz+T1pi5OFtnkQ+8qnmEbAr1Oo1FRm5B0dA=
+github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM=
github.com/containerd/cgroups/v3 v3.0.5 h1:44na7Ud+VwyE7LIoJ8JTNQOa549a8543BmzaJHo6Bzo=
github.com/containerd/cgroups/v3 v3.0.5/go.mod h1:SA5DLYnXO8pTGYiAHXz94qvLQTKfVM5GEVisn4jpins=
github.com/containerd/console v1.0.4 h1:F2g4+oChYvBTsASRTz8NP6iIAi97J3TtSAsLbIFn4ro=
@@ -175,8 +175,8 @@ github.com/docker/compose/v2 v2.35.1/go.mod h1:Ydd9ceg7VBOPSVAsDDKfyGGAkjejH3cD9
github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk=
github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
-github.com/docker/docker v28.1.1+incompatible h1:49M11BFLsVO1gxY9UX9p/zwkE/rswggs8AdFmXQw51I=
-github.com/docker/docker v28.1.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
+github.com/docker/docker v28.1.0+incompatible h1:4iqpcWQCt3Txcz7iWIb1U3SZ/n9ffo4U+ryY5/3eOp0=
+github.com/docker/docker v28.1.0+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker-credential-helpers v0.9.3 h1:gAm/VtF9wgqJMoxzT3Gj5p4AqIjCBS4wrsOh9yRqcz8=
github.com/docker/docker-credential-helpers v0.9.3/go.mod h1:x+4Gbw9aGmChi3qTLZj8Dfn0TD20M/fuWy0E5+WDeCo=
github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c h1:lzqkGL9b3znc+ZUgi7FlLnqjQhcXxkNM/quxIjBVMD0=
@@ -347,6 +347,8 @@ github.com/hyperledger/fabric-config v0.3.0 h1:FS5/dc9GAniljP6RYxQRG92AaiBVoN2vT
github.com/hyperledger/fabric-config v0.3.0/go.mod h1:kSevTn78K83Suc++JsEo7Nt1tYIPqDajW+ORz3OhWlg=
github.com/hyperledger/fabric-gateway v1.5.0 h1:JChlqtJNm2479Q8YWJ6k8wwzOiu2IRrV3K8ErsQmdTU=
github.com/hyperledger/fabric-gateway v1.5.0/go.mod h1:v13OkXAp7pKi4kh6P6epn27SyivRbljr8Gkfy8JlbtM=
+github.com/hyperledger/fabric-protos-go v0.3.7 h1:4Dp6esioyrbHaRZY8HcQG/ZN6ABPXcVEmGZWJlKc9mE=
+github.com/hyperledger/fabric-protos-go v0.3.7/go.mod h1:F+MmFQ9mnJzxB9Gus13XMoXrSJbIK/2QJOanEUZ5zoo=
github.com/hyperledger/fabric-protos-go-apiv2 v0.3.3 h1:Xpd6fzG/KjAOHJsq7EQXY2l+qi/y8muxBaY7R6QWABk=
github.com/hyperledger/fabric-protos-go-apiv2 v0.3.3/go.mod h1:2pq0ui6ZWA0cC8J+eCErgnMDCS1kPOEYVY+06ZAK0qE=
github.com/in-toto/in-toto-golang v0.9.0 h1:tHny7ac4KgtsfrG6ybU8gVOZux2H8jN05AXJ9EBM1XU=
@@ -391,10 +393,8 @@ github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zt
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
-github.com/klauspost/cpuid/v2 v2.2.3 h1:sxCkb+qR91z4vsqw4vGGZlDgPz3G7gjaLyK3V8y70BU=
-github.com/klauspost/cpuid/v2 v2.2.3/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY=
-github.com/klauspost/cpuid/v2 v2.2.4 h1:acbojRNwl3o09bUq+yDCtZFc1aiwaAAxtcn8YkZXnvk=
-github.com/klauspost/cpuid/v2 v2.2.4/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY=
+github.com/klauspost/cpuid/v2 v2.2.7 h1:ZWSB3igEs+d0qvnxR/ZBzXVmxkgt8DdzP6m9pfuVLDM=
+github.com/klauspost/cpuid/v2 v2.2.7/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
@@ -416,10 +416,10 @@ github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6
github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg=
github.com/libp2p/go-flow-metrics v0.1.0 h1:0iPhMI8PskQwzh57jB9WxIuIOQ0r+15PChFGkx3Q3WM=
github.com/libp2p/go-flow-metrics v0.1.0/go.mod h1:4Xi8MX8wj5aWNDAZttg6UPmc0ZrnFNsMtpsYUClFtro=
-github.com/libp2p/go-libp2p v0.26.3 h1:6g/psubqwdaBqNNoidbRKSTBEYgaOuKBhHl8Q5tO+PM=
-github.com/libp2p/go-libp2p v0.26.3/go.mod h1:x75BN32YbwuY0Awm2Uix4d4KOz+/4piInkp4Wr3yOo8=
github.com/libp2p/go-libp2p v0.27.8 h1:IX5x/4yKwyPQeVS2AXHZ3J4YATM9oHBGH1gBc23jBAI=
github.com/libp2p/go-libp2p v0.27.8/go.mod h1:eCFFtd0s5i/EVKR7+5Ki8bM7qwkNW3TPTTSSW9sz8NE=
+github.com/lithammer/shortuuid/v4 v4.2.0 h1:LMFOzVB3996a7b8aBuEXxqOBflbfPQAiVzkIcHO0h8c=
+github.com/lithammer/shortuuid/v4 v4.2.0/go.mod h1:D5noHZ2oFw/YaKCfGy0YxyE7M0wMbezmMjPdhyEFe6Y=
github.com/magiconair/properties v1.5.3/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/magiconair/properties v1.8.9 h1:nWcCbLq1N2v/cpNsy5WvQ37Fb+YElfq20WJ/a8RkpQM=
github.com/magiconair/properties v1.8.9/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0=
@@ -509,8 +509,6 @@ github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aG
github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI=
github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0=
github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4=
-github.com/multiformats/go-multiaddr v0.8.0 h1:aqjksEcqK+iD/Foe1RRFsGZh8+XFiGo7FgUCZlpv3LU=
-github.com/multiformats/go-multiaddr v0.8.0/go.mod h1:Fs50eBDWvZu+l3/9S6xAE7ZYj6yhxlvaVZjakWN7xRs=
github.com/multiformats/go-multiaddr v0.9.0 h1:3h4V1LHIk5w4hJHekMKWALPXErDfz/sggzwC/NcqbDQ=
github.com/multiformats/go-multiaddr v0.9.0/go.mod h1:mI67Lb1EeTOYb8GQfL/7wpIZwc46ElrvzhYnoJOmTT0=
github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g=
@@ -729,8 +727,6 @@ go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.uber.org/mock v0.5.1 h1:ASgazW/qBmR+A32MYFDB6E2POoTgOwT509VP0CT/fjs=
go.uber.org/mock v0.5.1/go.mod h1:ge71pBPLYDk7QIi1LupWxdAykm7KIEFchiOqd6z7qMM=
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
-go.uber.org/multierr v1.10.0 h1:S0h4aNzvfcFsC3dRF1jLoaov7oRaKqRGC/pUEJ2yvPQ=
-go.uber.org/multierr v1.10.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
go.uber.org/zap v1.18.1/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI=
@@ -810,10 +806,10 @@ golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20=
golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
diff --git a/pkg/audit/audit.go b/pkg/audit/audit.go
new file mode 100644
index 0000000..ef24510
--- /dev/null
+++ b/pkg/audit/audit.go
@@ -0,0 +1,96 @@
+package audit
+
+import (
+ "encoding/json"
+ "time"
+
+ "github.com/google/uuid"
+)
+
+// EventOutcome represents the result of an audited event
+type EventOutcome string
+
+const (
+ EventOutcomeSuccess EventOutcome = "SUCCESS"
+ EventOutcomeFailure EventOutcome = "FAILURE"
+ EventOutcomePending EventOutcome = "PENDING"
+)
+
+// Severity represents the importance level of an audit event
+type Severity string
+
+const (
+ SeverityDebug Severity = "DEBUG"
+ SeverityInfo Severity = "INFO"
+ SeverityWarning Severity = "WARNING"
+ SeverityCritical Severity = "CRITICAL"
+)
+
+// Event represents an audit event to be logged
+type Event struct {
+ ID int64 `json:"id"`
+ Timestamp time.Time `json:"timestamp"`
+ EventSource string `json:"eventSource"`
+ UserIdentity int64 `json:"userIdentity"`
+ SourceIP string `json:"sourceIp"`
+ EventType string `json:"eventType"`
+ EventOutcome EventOutcome `json:"eventOutcome"`
+ AffectedResource string `json:"affectedResource"`
+ RequestID uuid.UUID `json:"requestId"`
+ Severity Severity `json:"severity"`
+ Details map[string]interface{} `json:"details"`
+ SessionID string `json:"sessionId"`
+}
+
+// Config holds the configuration for the audit service
+type Config struct {
+ // AsyncBufferSize is the size of the buffer for async logging
+ AsyncBufferSize int
+ // WorkerCount is the number of workers for async logging
+ WorkerCount int
+}
+
+// DefaultConfig returns the default configuration
+func DefaultConfig() Config {
+ return Config{
+ AsyncBufferSize: 1000,
+ WorkerCount: 5,
+ }
+}
+
+// NewEvent creates a new audit event with default values
+func NewEvent() Event {
+ return Event{
+ Timestamp: time.Now().UTC(),
+ EventOutcome: EventOutcomeSuccess,
+ Severity: SeverityInfo,
+ Details: make(map[string]interface{}),
+ }
+}
+
+// WithDetails adds details to the event
+func (e Event) WithDetails(details map[string]interface{}) Event {
+ e.Details = details
+ return e
+}
+
+// WithSeverity sets the severity of the event
+func (e Event) WithSeverity(severity Severity) Event {
+ e.Severity = severity
+ return e
+}
+
+// WithOutcome sets the outcome of the event
+func (e Event) WithOutcome(outcome EventOutcome) Event {
+ e.EventOutcome = outcome
+ return e
+}
+
+// ToJSON converts the event to a JSON string
+func (e Event) ToJSON() (string, error) {
+ data, err := json.Marshal(e)
+ if err != nil {
+ return "", err
+ }
+ return string(data), nil
+}
diff --git a/pkg/audit/handler.go b/pkg/audit/handler.go
new file mode 100644
index 0000000..b5e4cc6
--- /dev/null
+++ b/pkg/audit/handler.go
@@ -0,0 +1,189 @@
+package audit
+
+import (
+ "encoding/json"
+ "net/http"
+ "strconv"
+ "time"
+
+ "github.com/chainlaunch/chainlaunch/pkg/auth"
+ "github.com/chainlaunch/chainlaunch/pkg/logger"
+ "github.com/go-chi/chi/v5"
+)
+
+// Handler handles HTTP requests for audit logs
+type Handler struct {
+ service *AuditService
+ logger *logger.Logger
+}
+
+// NewHandler creates a new audit handler
+func NewHandler(service *AuditService, logger *logger.Logger) *Handler {
+ return &Handler{
+ service: service,
+ logger: logger,
+ }
+}
+
+// RegisterRoutes registers the audit routes
+func (h *Handler) RegisterRoutes(r chi.Router) {
+ r.Route("/audit", func(r chi.Router) {
+ r.Get("/logs", h.ListLogs)
+ r.Get("/logs/{id}", h.GetLog)
+ })
+}
+
+// ListLogsRequest represents the request parameters for listing audit logs
+type ListLogsRequest struct {
+ Page int `json:"page"`
+ PageSize int `json:"page_size"`
+ Start time.Time `json:"start"`
+ End time.Time `json:"end"`
+ EventType string `json:"event_type"`
+ UserID string `json:"user_id"`
+}
+
+// ListLogsResponse represents the response for listing audit logs
+type ListLogsResponse struct {
+ Items []Event `json:"items"`
+ TotalCount int `json:"total_count"`
+ Page int `json:"page"`
+ PageSize int `json:"page_size"`
+}
+
+// ListLogs retrieves a list of audit logs
+// @Summary List audit logs
+// @Description Retrieves a paginated list of audit logs with optional filters
+// @Tags audit
+// @Accept json
+// @Produce json
+// @Param page query int false "Page number (default: 1)"
+// @Param page_size query int false "Page size (default: 10)"
+// @Param start query string false "Start time (RFC3339 format)"
+// @Param end query string false "End time (RFC3339 format)"
+// @Param event_type query string false "Filter by event type"
+// @Param user_id query string false "Filter by user ID"
+// @Success 200 {object} ListLogsResponse
+// @Failure 400 {object} map[string]string
+// @Failure 401 {object} map[string]string
+// @Failure 403 {object} map[string]string
+// @Failure 500 {object} map[string]string
+// @Router /audit/logs [get]
+// @BasePath /api/v1
+func (h *Handler) ListLogs(w http.ResponseWriter, r *http.Request) {
+ // Check if user has admin role
+ user, ok := auth.UserFromContext(r.Context())
+ if !ok || user.Role != auth.RoleAdmin {
+ http.Error(w, "Unauthorized: Admin role required", http.StatusForbidden)
+ return
+ }
+
+ // Parse query parameters
+ page, _ := strconv.Atoi(r.URL.Query().Get("page"))
+ if page < 1 {
+ page = 1
+ }
+
+ pageSize, _ := strconv.Atoi(r.URL.Query().Get("page_size"))
+ if pageSize < 1 {
+ pageSize = 10
+ }
+
+ startStr := r.URL.Query().Get("start")
+ var start *time.Time
+ if startStr != "" {
+ var err error
+ startTime, err := time.Parse(time.RFC3339, startStr)
+ if err != nil {
+ http.Error(w, "Invalid start time format", http.StatusBadRequest)
+ return
+ }
+ start = &startTime
+ }
+
+ endStr := r.URL.Query().Get("end")
+ var end *time.Time
+ if endStr != "" {
+ var err error
+ endTime, err := time.Parse(time.RFC3339, endStr)
+ if err != nil {
+ http.Error(w, "Invalid end time format", http.StatusBadRequest)
+ return
+ }
+ end = &endTime
+ }
+
+ eventType := r.URL.Query().Get("event_type")
+ userIDInt := int64(0)
+ userID := r.URL.Query().Get("user_id")
+ if userID != "" {
+ userInt, err := strconv.ParseInt(userID, 10, 64)
+ if err != nil {
+ http.Error(w, "Invalid user ID", http.StatusBadRequest)
+ return
+ }
+ userIDInt = userInt
+ }
+
+ // TODO: Implement pagination and filtering in the service layer
+ // For now, we'll return all logs
+ logs, err := h.service.ListLogs(r.Context(), page, pageSize, start, end, eventType, userIDInt)
+ if err != nil {
+ h.logger.Error("Failed to list audit logs", "error", err)
+ http.Error(w, "Failed to list audit logs", http.StatusInternalServerError)
+ return
+ }
+
+ w.Header().Set("Content-Type", "application/json")
+ w.WriteHeader(http.StatusOK)
+ json.NewEncoder(w).Encode(logs)
+}
+
+// GetLog retrieves a specific audit log by ID
+// @Summary Get audit log
+// @Description Retrieves a specific audit log by ID
+// @Tags audit
+// @Produce json
+// @Param id path string true "Log ID"
+// @Success 200 {object} Event
+// @Failure 400 {object} map[string]string
+// @Failure 401 {object} map[string]string
+// @Failure 403 {object} map[string]string
+// @Failure 404 {object} map[string]string
+// @Failure 500 {object} map[string]string
+// @Router /audit/logs/{id} [get]
+// @BasePath /api/v1
+func (h *Handler) GetLog(w http.ResponseWriter, r *http.Request) {
+ // Check if user has admin role
+ user, ok := auth.UserFromContext(r.Context())
+ if !ok || user.Role != auth.RoleAdmin {
+ http.Error(w, "Unauthorized: Admin role required", http.StatusForbidden)
+ return
+ }
+
+ logID := chi.URLParam(r, "id")
+ if logID == "" {
+ http.Error(w, "Log ID is required", http.StatusBadRequest)
+ return
+ }
+ logIDInt, err := strconv.ParseInt(logID, 10, 64)
+ if err != nil {
+ http.Error(w, "Invalid log ID", http.StatusBadRequest)
+ return
+ }
+ log, err := h.service.GetLog(r.Context(), logIDInt)
+ if err != nil {
+ h.logger.Error("Failed to get audit log", "error", err)
+ http.Error(w, "Failed to get audit log", http.StatusInternalServerError)
+ return
+ }
+
+ if log == nil {
+ http.Error(w, "Log not found", http.StatusNotFound)
+ return
+ }
+
+ w.Header().Set("Content-Type", "application/json")
+ w.WriteHeader(http.StatusOK)
+ json.NewEncoder(w).Encode(log)
+}
diff --git a/pkg/audit/middleware.go b/pkg/audit/middleware.go
new file mode 100644
index 0000000..b9ae323
--- /dev/null
+++ b/pkg/audit/middleware.go
@@ -0,0 +1,207 @@
+package audit
+
+import (
+ "net/http"
+ "path/filepath"
+ "strings"
+ "time"
+
+ "github.com/chainlaunch/chainlaunch/pkg/auth"
+ httputil "github.com/chainlaunch/chainlaunch/pkg/http"
+ "github.com/google/uuid"
+)
+
+const (
+ // maxBodySize is the maximum size of request/response body to log (1MB)
+ maxBodySize = 1 * 1024 * 1024
+
+ // SessionCookieName is the name of the session cookie
+ SessionCookieName = "session_id"
+)
+
+// isStaticFile checks if the path is a static file
+func isStaticFile(path string) bool {
+ ext := strings.ToLower(filepath.Ext(path))
+ switch ext {
+ case ".html", ".htm", ".css", ".js", ".json", ".png", ".jpg", ".jpeg", ".gif", ".ico", ".svg", ".woff", ".woff2", ".ttf", ".eot":
+ return true
+ }
+ return false
+}
+
+// isAPIPath checks if the path is an API endpoint
+func isAPIPath(path string) bool {
+ return strings.HasPrefix(path, "/api/")
+}
+
+// isSecurityEvent checks if the request is a security-relevant event
+func isSecurityEvent(path string, method string) bool {
+ // Authentication endpoints
+ if strings.Contains(path, "/auth") || strings.Contains(path, "/login") {
+ return true
+ }
+ // Authorization changes
+ if strings.Contains(path, "/permissions") || strings.Contains(path, "/roles") {
+ return true
+ }
+ // System configuration changes
+ if strings.Contains(path, "/config") || strings.Contains(path, "/settings") {
+ return true
+ }
+ // Security-related operations
+ if method == http.MethodDelete || method == http.MethodPut {
+ return true
+ }
+ return false
+}
+
+// HTTPMiddleware creates a middleware that logs HTTP requests and responses
+func HTTPMiddleware(service *AuditService) func(http.Handler) http.Handler {
+ return func(next http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ // Skip auditing for static files and non-API paths
+ if isStaticFile(r.URL.Path) || !isAPIPath(r.URL.Path) {
+ next.ServeHTTP(w, r)
+ return
+ }
+
+ // Skip auditing for audit log endpoints to prevent infinite loops
+ if strings.Contains(r.URL.Path, "/api/v1/audit") {
+ next.ServeHTTP(w, r)
+ return
+ }
+
+ // Skip auditing for log streaming endpoints
+ if strings.Contains(r.URL.Path, "/api/v1/nodes/") && strings.HasSuffix(r.URL.Path, "/logs") {
+ next.ServeHTTP(w, r)
+ return
+ }
+
+ // Generate a unique request ID and get session ID
+ requestID := uuid.New()
+ sessionID := auth.GetSessionID(r)
+ if sessionID == "" {
+ sessionID = uuid.New().String()
+ }
+
+ // Create a response writer that captures the status code and body
+ rw := newResponseWriter(w)
+
+ // Start timing the request with UTC timestamp
+ start := time.Now().UTC()
+
+ // Process the request
+ next.ServeHTTP(rw, r)
+
+ // Calculate request duration
+ duration := time.Since(start)
+
+ // Create base event details
+ details := map[string]interface{}{
+ "method": r.Method,
+ "path": r.URL.Path,
+ "query": r.URL.RawQuery,
+ "user_agent": r.UserAgent(),
+ "duration": duration.String(),
+ "status": rw.statusCode,
+ "session_id": sessionID,
+ "correlation_id": r.Header.Get("X-Correlation-ID"),
+ "timestamp_utc": start.Format(time.RFC3339Nano),
+ "client_ip": r.RemoteAddr,
+ "forwarded_for": r.Header.Get("X-Forwarded-For"),
+ "is_security_event": isSecurityEvent(r.URL.Path, r.Method),
+ }
+
+ // Get request body from resource context if available
+ if resource, ok := httputil.ResourceFromContext(r); ok {
+ if len(resource.Body) > 0 && len(resource.Body) <= maxBodySize {
+ details["request_body"] = string(resource.Body)
+ }
+ }
+
+ // Add response body for non-GET requests or error responses
+ if (r.Method != http.MethodGet || rw.statusCode >= 400) && len(rw.body) > 0 && len(rw.body) <= maxBodySize {
+ details["response_body"] = string(rw.body)
+ }
+
+ // Create audit event
+ event := NewEvent().WithDetails(details)
+
+ // Set event fields
+ event.EventSource = "http"
+ event.EventType = "http_request"
+ event.RequestID = requestID
+ event.SourceIP = r.RemoteAddr
+
+ // Set resource information if available
+ if resource, ok := httputil.ResourceFromContext(r); ok {
+ event.AffectedResource = resource.Type
+ if resource.ID != "" {
+ event.AffectedResource += ":" + resource.ID
+ }
+ // Add resource action to details
+ details["resource_action"] = resource.Action
+ }
+
+ // Set user identity and authentication method if available
+ if user, ok := auth.UserFromContext(r.Context()); ok {
+ event.UserIdentity = user.ID
+ details["auth_method"] = r.Header.Get("X-Auth-Method")
+ details["auth_provider"] = r.Header.Get("X-Auth-Provider")
+ }
+
+ // Set outcome based on status code
+ if rw.statusCode >= 200 && rw.statusCode < 400 {
+ event.EventOutcome = EventOutcomeSuccess
+ } else {
+ event.EventOutcome = EventOutcomeFailure
+ // Add failure reason for security events
+ if isSecurityEvent(r.URL.Path, r.Method) {
+ details["failure_reason"] = http.StatusText(rw.statusCode)
+ }
+ }
+
+ // Set severity based on status code and event type
+ switch {
+ case rw.statusCode >= 500:
+ event.Severity = SeverityCritical
+ case rw.statusCode >= 400:
+ event.Severity = SeverityWarning
+ case isSecurityEvent(r.URL.Path, r.Method):
+ event.Severity = SeverityInfo
+ default:
+ event.Severity = SeverityDebug
+ }
+
+ // Log the event asynchronously
+ service.LogEventAsync(event)
+ })
+ }
+}
+
+// responseWriter is a wrapper around http.ResponseWriter that captures the status code and body
+type responseWriter struct {
+ http.ResponseWriter
+ statusCode int
+ body []byte
+}
+
+// newResponseWriter creates a new responseWriter
+func newResponseWriter(w http.ResponseWriter) *responseWriter {
+ return &responseWriter{w, http.StatusOK, nil}
+}
+
+// WriteHeader captures the status code before writing it
+func (rw *responseWriter) WriteHeader(code int) {
+ rw.statusCode = code
+ rw.ResponseWriter.WriteHeader(code)
+}
+
+// Write captures the response body before writing it
+func (rw *responseWriter) Write(b []byte) (int, error) {
+ // Only capture body if it's not too large
+ if len(rw.body)+len(b) <= maxBodySize {
+ rw.body = append(rw.body, b...)
+ }
+ return rw.ResponseWriter.Write(b)
+}
diff --git a/pkg/audit/service.go b/pkg/audit/service.go
new file mode 100644
index 0000000..dccd85a
--- /dev/null
+++ b/pkg/audit/service.go
@@ -0,0 +1,245 @@
+package audit
+
+import (
+ "context"
+ "database/sql"
+ "encoding/json"
+ "sync"
+ "time"
+
+ "github.com/chainlaunch/chainlaunch/pkg/db"
+ "github.com/google/uuid"
+)
+
+// AuditService implements the Service interface
+type AuditService struct {
+ db *db.Queries
+ queue chan Event
+ workers int
+ wg sync.WaitGroup
+ stopChan chan struct{}
+}
+
+// NewService creates a new audit service
+func NewService(db *db.Queries, workers int) *AuditService {
+ if workers <= 0 {
+ workers = 5 // Default number of workers
+ }
+
+ s := &AuditService{
+ db: db,
+ queue: make(chan Event, 1000), // Buffer size of 1000 events
+ workers: workers,
+ stopChan: make(chan struct{}),
+ }
+
+ s.startWorkers()
+ return s
+}
+
+func (s *AuditService) startWorkers() {
+ for i := 0; i < s.workers; i++ {
+ s.wg.Add(1)
+ go s.worker()
+ }
+}
+
+// LogEvent implements the Service interface
+func (s *AuditService) LogEvent(ctx context.Context, event Event) error {
+ details, err := json.Marshal(event.Details)
+ if err != nil {
+ return err
+ }
+
+ // Create audit log using generated query
+ _, err = s.db.CreateAuditLog(ctx, &db.CreateAuditLogParams{
+ Timestamp: event.Timestamp,
+ EventSource: event.EventSource,
+ UserIdentity: event.UserIdentity,
+ SourceIp: sql.NullString{String: event.SourceIP, Valid: true},
+ EventType: event.EventType,
+ EventOutcome: string(event.EventOutcome),
+ AffectedResource: sql.NullString{String: event.AffectedResource, Valid: true},
+ RequestID: sql.NullString{String: event.RequestID.String(), Valid: true},
+ Severity: sql.NullString{String: string(event.Severity), Valid: true},
+ Details: sql.NullString{String: string(details), Valid: true},
+ SessionID: sql.NullString{String: event.SessionID, Valid: true},
+ })
+
+ return err
+}
+
+// LogEventAsync implements the Service interface
+func (s *AuditService) LogEventAsync(event Event) {
+ select {
+ case s.queue <- event:
+ // Event queued successfully
+ default:
+ // Queue is full, log error but don't block
+ }
+}
+
+// worker processes events from the channel
+func (s *AuditService) worker() {
+ defer s.wg.Done()
+
+ for {
+ select {
+ case event := <-s.queue:
+ // Convert event to database model
+ details, err := json.Marshal(event.Details)
+ if err != nil {
+ // Log error but continue processing
+ continue
+ }
+
+ // Create audit log using generated query
+ _, err = s.db.CreateAuditLog(context.Background(), &db.CreateAuditLogParams{
+ Timestamp: event.Timestamp,
+ EventSource: event.EventSource,
+ UserIdentity: event.UserIdentity,
+ SourceIp: sql.NullString{String: event.SourceIP, Valid: true},
+ EventType: event.EventType,
+ EventOutcome: string(event.EventOutcome),
+ AffectedResource: sql.NullString{String: event.AffectedResource, Valid: true},
+ RequestID: sql.NullString{String: event.RequestID.String(), Valid: true},
+ Severity: sql.NullString{String: string(event.Severity), Valid: true},
+ Details: sql.NullString{String: string(details), Valid: true},
+ })
+
+ if err != nil {
+ // Log error but continue processing
+ continue
+ }
+
+ case <-s.stopChan:
+ return
+ }
+ }
+}
+
+// Close stops the service and waits for all workers to finish
+func (s *AuditService) Close() {
+ close(s.stopChan)
+ s.wg.Wait()
+ close(s.queue)
+}
+
+// ListLogs implements the Service interface
+func (s *AuditService) ListLogs(ctx context.Context, page, pageSize int, start, end *time.Time, eventType string, userID int64) (*ListLogsResponse, error) {
+ offset := (page - 1) * pageSize
+
+ // Convert time pointers to sql.NullTime
+ var startTime, endTime sql.NullTime
+ if start != nil {
+ startTime.Time = *start
+ startTime.Valid = true
+ }
+ if end != nil {
+ endTime.Time = *end
+ endTime.Valid = true
+ }
+
+ // Get logs using generated query
+ logs, err := s.db.ListAuditLogs(ctx, &db.ListAuditLogsParams{
+ Column1: start,
+ Timestamp: startTime.Time,
+ Column3: end,
+ Timestamp_2: endTime.Time,
+ Column5: eventType,
+ EventType: eventType,
+ Column7: userID,
+ UserIdentity: userID,
+ Limit: int64(pageSize),
+ Offset: int64(offset),
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // Get total count using generated query
+ total, err := s.db.CountAuditLogs(ctx, &db.CountAuditLogsParams{
+ Column1: start,
+ Timestamp: startTime.Time,
+ Column3: end,
+ Timestamp_2: endTime.Time,
+ Column5: eventType,
+ EventType: eventType,
+ Column7: userID,
+ UserIdentity: userID,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // Convert database models to response models
+ events := make([]Event, len(logs))
+ for i, log := range logs {
+ var details map[string]interface{}
+ if err := json.Unmarshal([]byte(log.Details.String), &details); err != nil {
+ return nil, err
+ }
+
+ requestID, err := uuid.Parse(log.RequestID.String)
+ if err != nil {
+ return nil, err
+ }
+
+ events[i] = Event{
+ ID: log.ID,
+ Timestamp: log.Timestamp,
+ EventSource: log.EventSource,
+ UserIdentity: log.UserIdentity,
+ SourceIP: log.SourceIp.String,
+ EventType: log.EventType,
+ EventOutcome: EventOutcome(log.EventOutcome),
+ AffectedResource: log.AffectedResource.String,
+ RequestID: requestID,
+ Severity: Severity(log.Severity.String),
+ Details: details,
+ }
+ }
+
+ return &ListLogsResponse{
+ Items: events,
+ TotalCount: int(total),
+ Page: page,
+ PageSize: pageSize,
+ }, nil
+}
+
+// GetLog implements the Service interface
+func (s *AuditService) GetLog(ctx context.Context, id int64) (*Event, error) {
+ log, err := s.db.GetAuditLog(ctx, id)
+ if err != nil {
+ if err == sql.ErrNoRows {
+ return nil, nil
+ }
+ return nil, err
+ }
+
+ var details map[string]interface{}
+ if err := json.Unmarshal([]byte(log.Details.String), &details); err != nil {
+ return nil, err
+ }
+
+ requestID, err := uuid.Parse(log.RequestID.String)
+ if err != nil {
+ return nil, err
+ }
+
+ return &Event{
+ ID: log.ID,
+ Timestamp: log.Timestamp,
+ EventSource: log.EventSource,
+ UserIdentity: log.UserIdentity,
+ SourceIP: log.SourceIp.String,
+ EventType: log.EventType,
+ EventOutcome: EventOutcome(log.EventOutcome),
+ AffectedResource: log.AffectedResource.String,
+ RequestID: requestID,
+ Severity: Severity(log.Severity.String),
+ Details: details,
+ SessionID: log.SessionID.String,
+ }, nil
+}
diff --git a/pkg/auth/middleware.go b/pkg/auth/middleware.go
index 2100c13..60b041b 100644
--- a/pkg/auth/middleware.go
+++ b/pkg/auth/middleware.go
@@ -57,6 +57,41 @@ func parseBasicAuth(r *http.Request) (username, password string, ok bool) {
return pair[0], pair[1], true
}
+// GetSessionID extracts and validates the session ID from the request
+// Returns the session ID if valid, empty string otherwise
+func GetSessionID(r *http.Request) string {
+ // Try Bearer token auth first
+ authHeader := r.Header.Get("Authorization")
+ if authHeader != "" {
+ parts := strings.Split(authHeader, " ")
+ if len(parts) == 2 && parts[0] == "Bearer" {
+ return parts[1]
+ }
+ }
+
+ // Try cookie auth
+ cookie, err := r.Cookie(SessionCookieName)
+ if err != nil {
+ return ""
+ }
+
+ // Split cookie value into session ID and signature
+ parts := strings.Split(cookie.Value, ".")
+ if len(parts) != 2 {
+ return ""
+ }
+
+ sessionID := parts[0]
+ signature := parts[1]
+
+ // Verify signature
+ if !verifySessionID(sessionID, signature) {
+ return ""
+ }
+
+ return sessionID
+}
+
// AuthMiddleware validates the session token and adds the user to the context
func AuthMiddleware(authService *AuthService) func(http.Handler) http.Handler {
return func(next http.Handler) http.Handler {
diff --git a/pkg/auth/service.go b/pkg/auth/service.go
index 5b07e0b..d449022 100644
--- a/pkg/auth/service.go
+++ b/pkg/auth/service.go
@@ -228,6 +228,7 @@ func (s *AuthService) GetUserByUsername(username string) (*User, error) {
Role: Role(dbUser.Role.String),
CreatedAt: dbUser.CreatedAt,
LastLoginAt: dbUser.LastLoginAt.Time,
+ Password: dbUser.Password,
}, nil
}
diff --git a/pkg/common/addresses/addresses.go b/pkg/common/addresses/addresses.go
new file mode 100644
index 0000000..6ee9dd0
--- /dev/null
+++ b/pkg/common/addresses/addresses.go
@@ -0,0 +1,59 @@
+package addresses
+
+import (
+ "fmt"
+ "net"
+ "os"
+)
+
+// GetExternalIP returns the external IP address of the node
+func GetExternalIP() (string, error) {
+ // Try to get external IP from environment variable first
+ if externalIP := os.Getenv("EXTERNAL_IP"); externalIP != "" {
+ return externalIP, nil
+ }
+
+ // Get local network interfaces
+ interfaces, err := net.Interfaces()
+ if err != nil {
+ return "", fmt.Errorf("failed to get network interfaces: %w", err)
+ }
+
+ // Look for a suitable non-loopback interface with an IPv4 address
+ for _, iface := range interfaces {
+ // Skip loopback, down interfaces, and interfaces without addresses
+ if iface.Flags&net.FlagLoopback != 0 || iface.Flags&net.FlagUp == 0 {
+ continue
+ }
+
+ addrs, err := iface.Addrs()
+ if err != nil {
+ continue
+ }
+
+ for _, addr := range addrs {
+ // Check if this is an IP network address
+ ipNet, ok := addr.(*net.IPNet)
+ if !ok {
+ continue
+ }
+
+ // Skip loopback and IPv6 addresses
+ ip := ipNet.IP.To4()
+ if ip == nil || ip.IsLoopback() {
+ continue
+ }
+
+ // Skip link-local addresses
+ if ip[0] == 169 && ip[1] == 254 {
+ continue
+ }
+
+ // Found a suitable IP address
+ return ip.String(), nil
+ }
+ }
+
+ // Fallback to localhost if no suitable interface is found
+ return "127.0.0.1", nil
+}
diff --git a/pkg/common/ports/ports.go b/pkg/common/ports/ports.go
new file mode 100644
index 0000000..0cc5c30
--- /dev/null
+++ b/pkg/common/ports/ports.go
@@ -0,0 +1,180 @@
+package ports
+
+import (
+ "fmt"
+ "net"
+ "sync"
+)
+
+var (
+ // Default port ranges for different node types
+ DefaultPortRanges = map[string]PortRange{
+ "fabric-peer": {
+ Start: 7051,
+ End: 7151,
+ },
+ "fabric-orderer": {
+ Start: 7050,
+ End: 7150,
+ },
+ "fabric-ca": {
+ Start: 7054,
+ End: 7154,
+ },
+ "besu": {
+ Start: 8545,
+ End: 8645,
+ },
+ "besu-p2p": {
+ Start: 30303,
+ End: 30403,
+ },
+ "besu-metrics": {
+ Start: 9545,
+ End: 9645,
+ },
+ }
+
+ // Mutex to protect port allocation
+ portMutex sync.Mutex
+ // Map to track allocated ports
+ allocatedPorts = make(map[int]string)
+)
+
+// PortRange represents a range of ports that can be allocated
+type PortRange struct {
+ Start int
+ End int
+}
+
+// PortAllocation represents an allocated port with its type
+type PortAllocation struct {
+ Port int
+ NodeType string
+}
+
+// GetFreePort finds a free port in the specified range
+func GetFreePort(nodeType string) (*PortAllocation, error) {
+ portMutex.Lock()
+ defer portMutex.Unlock()
+
+ // Get port range for node type
+ portRange, exists := DefaultPortRanges[nodeType]
+ if !exists {
+ return nil, fmt.Errorf("unknown node type: %s", nodeType)
+ }
+
+ // Try to find a free port in the range
+ for port := portRange.Start; port <= portRange.End; port++ {
+ if _, allocated := allocatedPorts[port]; !allocated {
+ // Check if port is actually free
+ addr := fmt.Sprintf(":%d", port)
+ listener, err := net.Listen("tcp", addr)
+ if err == nil {
+ listener.Close()
+ allocatedPorts[port] = nodeType
+ return &PortAllocation{
+ Port: port,
+ NodeType: nodeType,
+ }, nil
+ }
+ }
+ }
+
+ return nil, fmt.Errorf("no free ports available in range %d-%d for node type %s",
+ portRange.Start, portRange.End, nodeType)
+}
+
+// ReleasePort releases an allocated port
+func ReleasePort(port int) error {
+ portMutex.Lock()
+ defer portMutex.Unlock()
+
+ if _, exists := allocatedPorts[port]; !exists {
+ return fmt.Errorf("port %d is not allocated", port)
+ }
+
+ delete(allocatedPorts, port)
+ return nil
+}
+
+// GetFreePorts allocates multiple free ports for a node
+func GetFreePorts(nodeType string, count int) ([]*PortAllocation, error) {
+ allocations := make([]*PortAllocation, 0, count)
+
+ for i := 0; i < count; i++ {
+ allocation, err := GetFreePort(nodeType)
+ if err != nil {
+ // Release any previously allocated ports
+ for _, alloc := range allocations {
+ ReleasePort(alloc.Port)
+ }
+ return nil, fmt.Errorf("failed to allocate port %d: %w", i+1, err)
+ }
+ allocations = append(allocations, allocation)
+ }
+
+ return allocations, nil
+}
+
+// IsPortAvailable checks if a specific port is available
+func IsPortAvailable(port int) bool {
+ portMutex.Lock()
+ defer portMutex.Unlock()
+
+ if _, allocated := allocatedPorts[port]; allocated {
+ return false
+ }
+
+ addrs := []string{
+ "0.0.0.0",
+ "127.0.0.1",
+ }
+ for _, addr := range addrs {
+ fullAddr := fmt.Sprintf("%s:%d", addr, port)
+ listener, err := net.Listen("tcp", fullAddr)
+ if err != nil {
+ return false
+ }
+ listener.Close()
+ }
+ return true
+}
+
+// GetPortRange returns the port range for a specific node type
+func GetPortRange(nodeType string) (*PortRange, error) {
+ portRange, exists := DefaultPortRanges[nodeType]
+ if !exists {
+ return nil, fmt.Errorf("unknown node type: %s", nodeType)
+ }
+ return &portRange, nil
+}
+
+// AddPortRange adds a new port range for a node type
+func AddPortRange(nodeType string, start, end int) error {
+ portMutex.Lock()
+ defer portMutex.Unlock()
+
+ if start >= end {
+ return fmt.Errorf("invalid port range: start (%d) must be less than end (%d)", start, end)
+ }
+
+ DefaultPortRanges[nodeType] = PortRange{
+ Start: start,
+ End: end,
+ }
+ return nil
+}
+
+// GetAllocatedPorts returns a map of all currently allocated ports
+func GetAllocatedPorts() map[int]string {
+ portMutex.Lock()
+ defer portMutex.Unlock()
+
+ // Create a copy of the map to prevent external modification
+ ports := make(map[int]string, len(allocatedPorts))
+ for port, nodeType := range allocatedPorts {
+ ports[port] = nodeType
+ }
+ return ports
+}
diff --git a/pkg/common/ports/ports_test.go b/pkg/common/ports/ports_test.go
new file mode 100644
index 0000000..a353861
--- /dev/null
+++ b/pkg/common/ports/ports_test.go
@@ -0,0 +1,186 @@
+package ports
+
+import (
+ "net"
+ "strconv"
+ "testing"
+)
+
+func TestGetFreePort(t *testing.T) {
+ // Test getting a free port for a known node type
+ allocation, err := GetFreePort("fabric-peer")
+ if err != nil {
+ t.Fatalf("Failed to get free port: %v", err)
+ }
+
+ if allocation.NodeType != "fabric-peer" {
+ t.Errorf("Expected node type 'fabric-peer', got '%s'", allocation.NodeType)
+ }
+
+ if allocation.Port < 7051 || allocation.Port > 7151 {
+ t.Errorf("Port %d outside expected range 7051-7151", allocation.Port)
+ }
+
+ // Test that the port is actually free
+ addr := net.JoinHostPort("", strconv.Itoa(allocation.Port))
+ listener, err := net.Listen("tcp", addr)
+ if err != nil {
+ t.Errorf("Port %d is not actually free: %v", allocation.Port, err)
+ }
+ listener.Close()
+
+ // Test getting a port for unknown node type
+ _, err = GetFreePort("unknown-type")
+ if err == nil {
+ t.Error("Expected error for unknown node type, got nil")
+ }
+}
+
+func TestReleasePort(t *testing.T) {
+ // Get a port first
+ allocation, err := GetFreePort("fabric-peer")
+ if err != nil {
+ t.Fatalf("Failed to get free port: %v", err)
+ }
+
+ // Test releasing the port
+ err = ReleasePort(allocation.Port)
+ if err != nil {
+ t.Errorf("Failed to release port: %v", err)
+ }
+
+ // Test releasing a non-allocated port
+ err = ReleasePort(9999)
+ if err == nil {
+ t.Error("Expected error when releasing non-allocated port, got nil")
+ }
+}
+
+func TestGetFreePorts(t *testing.T) {
+ // Test getting multiple ports
+ allocations, err := GetFreePorts("fabric-peer", 3)
+ if err != nil {
+ t.Fatalf("Failed to get free ports: %v", err)
+ }
+
+ if len(allocations) != 3 {
+ t.Errorf("Expected 3 ports, got %d", len(allocations))
+ }
+
+ // Verify all ports are unique
+ ports := make(map[int]bool)
+ for _, alloc := range allocations {
+ if ports[alloc.Port] {
+ t.Errorf("Duplicate port %d allocated", alloc.Port)
+ }
+ ports[alloc.Port] = true
+ }
+
+ // Clean up
+ for _, alloc := range allocations {
+ ReleasePort(alloc.Port)
+ }
+}
+
+func TestIsPortAvailable(t *testing.T) {
+ // Find a free port in the range
+ portRange, err := GetPortRange("fabric-peer")
+ if err != nil {
+ t.Fatalf("Failed to get port range: %v", err)
+ }
+
+ var freePort int
+ for p := portRange.Start; p <= portRange.End; p++ {
+ if IsPortAvailable(p) {
+ freePort = p
+ break
+ }
+ }
+ if freePort == 0 {
+ t.Fatal("No free port found in range")
+ }
+
+ // Test with a free port
+ if !IsPortAvailable(freePort) {
+ t.Errorf("Port %d should be available", freePort)
+ }
+
+ // Allocate a port
+ allocation, err := GetFreePort("fabric-peer")
+ if err != nil {
+ t.Fatalf("Failed to get free port: %v", err)
+ }
+
+ // Test with an allocated port
+ if IsPortAvailable(allocation.Port) {
+ t.Errorf("Port %d should not be available", allocation.Port)
+ }
+
+ // Clean up
+ ReleasePort(allocation.Port)
+}
+
+func TestGetPortRange(t *testing.T) {
+ // Test getting range for known node type
+ portRange, err := GetPortRange("fabric-peer")
+ if err != nil {
+ t.Fatalf("Failed to get port range: %v", err)
+ }
+
+ if portRange.Start != 7051 || portRange.End != 7151 {
+ t.Errorf("Expected range 7051-7151, got %d-%d", portRange.Start, portRange.End)
+ }
+
+ // Test getting range for unknown node type
+ _, err = GetPortRange("unknown-type")
+ if err == nil {
+ t.Error("Expected error for unknown node type, got nil")
+ }
+}
+
+func TestAddPortRange(t *testing.T) {
+ // Test adding valid port range
+ err := AddPortRange("test-type", 8000, 8100)
+ if err != nil {
+ t.Errorf("Failed to add port range: %v", err)
+ }
+
+ // Verify the range was added
+ portRange, err := GetPortRange("test-type")
+ if err != nil {
+ t.Fatalf("Failed to get port range: %v", err)
+ }
+
+ if portRange.Start != 8000 || portRange.End != 8100 {
+ t.Errorf("Expected range 8000-8100, got %d-%d", portRange.Start, portRange.End)
+ }
+
+ // Test adding invalid port range
+ err = AddPortRange("invalid-range", 9000, 8000)
+ if err == nil {
+ t.Error("Expected error for invalid port range, got nil")
+ }
+}
+
+func TestGetAllocatedPorts(t *testing.T) {
+ // Get some ports first
+ allocations, err := GetFreePorts("fabric-peer", 2)
+ if err != nil {
+ t.Fatalf("Failed to get free ports: %v", err)
+ }
+
+ // Get all allocated ports
+ allocatedPorts := GetAllocatedPorts()
+
+ // Verify our ports are in the map
+ for _, alloc := range allocations {
+ if nodeType, exists := allocatedPorts[alloc.Port]; !exists || nodeType != "fabric-peer" {
+ t.Errorf("Port %d not found in allocated ports or wrong node type", alloc.Port)
+ }
+ }
+
+ // Clean up
+ for _, alloc := range allocations {
+ ReleasePort(alloc.Port)
+ }
+}
diff --git a/pkg/db/migrations/0007_add_prometheus_config.down.sql b/pkg/db/migrations/0007_add_prometheus_config.down.sql
new file mode 100644
index 0000000..9185a2c
--- /dev/null
+++ b/pkg/db/migrations/0007_add_prometheus_config.down.sql
@@ -0,0 +1 @@
+DROP TABLE IF EXISTS prometheus_config;
\ No newline at end of file
diff --git a/pkg/db/migrations/0007_add_prometheus_config.up.sql b/pkg/db/migrations/0007_add_prometheus_config.up.sql
new file mode 100644
index 0000000..e57bccc
--- /dev/null
+++ b/pkg/db/migrations/0007_add_prometheus_config.up.sql
@@ -0,0 +1,43 @@
+CREATE TABLE prometheus_config (
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
+ prometheus_port INTEGER NOT NULL,
+ data_dir TEXT NOT NULL,
+ config_dir TEXT NOT NULL,
+ container_name TEXT NOT NULL,
+ scrape_interval INTEGER NOT NULL,
+ evaluation_interval INTEGER NOT NULL,
+ deployment_mode TEXT NOT NULL DEFAULT 'docker',
+ docker_image TEXT NOT NULL DEFAULT 'prom/prometheus:latest',
+ docker_network TEXT,
+ docker_restart_policy TEXT NOT NULL DEFAULT 'unless-stopped',
+ docker_extra_args TEXT,
+ created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
+ updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP
+);
+
+-- Insert default configuration
+INSERT INTO prometheus_config (
+ prometheus_port,
+ data_dir,
+ config_dir,
+ container_name,
+ scrape_interval,
+ evaluation_interval,
+ deployment_mode,
+ docker_image,
+ docker_network,
+ docker_restart_policy,
+ docker_extra_args
+) VALUES (
+ 9090,
+ '/var/lib/prometheus',
+ '/etc/prometheus',
+ 'chainlaunch-prometheus',
+ 15,
+ 15,
+ 'docker',
+ 'prom/prometheus:latest',
+ 'chainlaunch-network',
+ 'unless-stopped',
+ '--web.enable-lifecycle --web.enable-admin-api'
+);
diff --git a/pkg/db/migrations/0008_create_audit_logs.down.sql b/pkg/db/migrations/0008_create_audit_logs.down.sql
new file mode 100644
index 0000000..e9d8bf4
--- /dev/null
+++ b/pkg/db/migrations/0008_create_audit_logs.down.sql
@@ -0,0 +1,2 @@
+DROP TRIGGER IF EXISTS update_audit_logs_updated_at;
+DROP TABLE IF EXISTS audit_logs;
\ No newline at end of file
diff --git a/pkg/db/migrations/0008_create_audit_logs.up.sql b/pkg/db/migrations/0008_create_audit_logs.up.sql
new file mode 100644
index 0000000..a8c0c91
--- /dev/null
+++ b/pkg/db/migrations/0008_create_audit_logs.up.sql
@@ -0,0 +1,29 @@
+CREATE TABLE IF NOT EXISTS audit_logs (
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
+ timestamp DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
+ event_source TEXT NOT NULL,
+ user_identity INTEGER NOT NULL,
+ source_ip TEXT,
+ event_type TEXT NOT NULL,
+ event_outcome TEXT NOT NULL,
+ affected_resource TEXT,
+ request_id TEXT,
+ severity TEXT,
+ details TEXT,
+ created_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
+ updated_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP
+);
+
+-- Create indexes for common query patterns
+CREATE INDEX IF NOT EXISTS idx_audit_logs_timestamp ON audit_logs(timestamp);
+CREATE INDEX IF NOT EXISTS idx_audit_logs_event_type ON audit_logs(event_type);
+CREATE INDEX IF NOT EXISTS idx_audit_logs_user_identity ON audit_logs(user_identity);
+CREATE INDEX IF NOT EXISTS idx_audit_logs_request_id ON audit_logs(request_id);
+
+-- Create trigger for updated_at
+CREATE TRIGGER update_audit_logs_updated_at
+AFTER UPDATE ON audit_logs
+BEGIN
+ UPDATE audit_logs SET updated_at = CURRENT_TIMESTAMP
+ WHERE id = NEW.id;
+END;
\ No newline at end of file
diff --git a/pkg/db/migrations/0009_add_session_id.down.sql b/pkg/db/migrations/0009_add_session_id.down.sql
new file mode 100644
index 0000000..95f533b
--- /dev/null
+++ b/pkg/db/migrations/0009_add_session_id.down.sql
@@ -0,0 +1,5 @@
+-- Remove session_id index
+DROP INDEX IF EXISTS idx_audit_logs_session_id;
+
+-- Remove session_id column
+ALTER TABLE audit_logs DROP COLUMN session_id;
\ No newline at end of file
diff --git a/pkg/db/migrations/0009_add_session_id.up.sql b/pkg/db/migrations/0009_add_session_id.up.sql
new file mode 100644
index 0000000..44df649
--- /dev/null
+++ b/pkg/db/migrations/0009_add_session_id.up.sql
@@ -0,0 +1,5 @@
+-- Add session_id column to audit_logs table
+ALTER TABLE audit_logs ADD COLUMN session_id TEXT;
+
+-- Create index for session_id to improve query performance
+CREATE INDEX IF NOT EXISTS idx_audit_logs_session_id ON audit_logs(session_id);
\ No newline at end of file
diff --git a/pkg/db/models.go b/pkg/db/models.go
index 3aab915..4813936 100644
--- a/pkg/db/models.go
+++ b/pkg/db/models.go
@@ -9,6 +9,23 @@ import (
"time"
)
+type AuditLog struct {
+ ID int64 `json:"id"`
+ Timestamp time.Time `json:"timestamp"`
+ EventSource string `json:"eventSource"`
+ UserIdentity int64 `json:"userIdentity"`
+ SourceIp sql.NullString `json:"sourceIp"`
+ EventType string `json:"eventType"`
+ EventOutcome string `json:"eventOutcome"`
+ AffectedResource sql.NullString `json:"affectedResource"`
+ RequestID sql.NullString `json:"requestId"`
+ Severity sql.NullString `json:"severity"`
+ Details sql.NullString `json:"details"`
+ CreatedAt time.Time `json:"createdAt"`
+ UpdatedAt time.Time `json:"updatedAt"`
+ SessionID sql.NullString `json:"sessionId"`
+}
+
type Backup struct {
ID int64 `json:"id"`
ScheduleID sql.NullInt64 `json:"scheduleId"`
@@ -237,6 +254,23 @@ type Plugin struct {
DeploymentStatus sql.NullString `json:"deploymentStatus"`
}
+type PrometheusConfig struct {
+ ID int64 `json:"id"`
+ PrometheusPort int64 `json:"prometheusPort"`
+ DataDir string `json:"dataDir"`
+ ConfigDir string `json:"configDir"`
+ ContainerName string `json:"containerName"`
+ ScrapeInterval int64 `json:"scrapeInterval"`
+ EvaluationInterval int64 `json:"evaluationInterval"`
+ DeploymentMode string `json:"deploymentMode"`
+ DockerImage string `json:"dockerImage"`
+ DockerNetwork sql.NullString `json:"dockerNetwork"`
+ DockerRestartPolicy string `json:"dockerRestartPolicy"`
+ DockerExtraArgs sql.NullString `json:"dockerExtraArgs"`
+ CreatedAt time.Time `json:"createdAt"`
+ UpdatedAt time.Time `json:"updatedAt"`
+}
+
type Session struct {
ID int64 `json:"id"`
SessionID string `json:"sessionId"`
diff --git a/pkg/db/querier.go b/pkg/db/querier.go
index cb081bf..7b0a459 100644
--- a/pkg/db/querier.go
+++ b/pkg/db/querier.go
@@ -12,6 +12,7 @@ import (
type Querier interface {
AddRevokedCertificate(ctx context.Context, arg *AddRevokedCertificateParams) error
CheckNetworkNodeExists(ctx context.Context, arg *CheckNetworkNodeExistsParams) (int64, error)
+ CountAuditLogs(ctx context.Context, arg *CountAuditLogsParams) (int64, error)
CountBackupsBySchedule(ctx context.Context, scheduleID sql.NullInt64) (int64, error)
CountBackupsByTarget(ctx context.Context, targetID int64) (int64, error)
CountNetworks(ctx context.Context) (int64, error)
@@ -19,6 +20,7 @@ type Querier interface {
CountNodes(ctx context.Context) (int64, error)
CountNodesByPlatform(ctx context.Context, platform string) (int64, error)
CountUsers(ctx context.Context) (int64, error)
+ CreateAuditLog(ctx context.Context, arg *CreateAuditLogParams) (*AuditLog, error)
CreateBackup(ctx context.Context, arg *CreateBackupParams) (*Backup, error)
CreateBackupSchedule(ctx context.Context, arg *CreateBackupScheduleParams) (*BackupSchedule, error)
CreateBackupTarget(ctx context.Context, arg *CreateBackupTargetParams) (*BackupTarget, error)
@@ -60,6 +62,7 @@ type Querier interface {
EnableBackupSchedule(ctx context.Context, id int64) (*BackupSchedule, error)
GetAllKeys(ctx context.Context, arg *GetAllKeysParams) ([]*GetAllKeysRow, error)
GetAllNodes(ctx context.Context) ([]*Node, error)
+ GetAuditLog(ctx context.Context, id int64) (*AuditLog, error)
GetBackup(ctx context.Context, id int64) (*Backup, error)
GetBackupSchedule(ctx context.Context, id int64) (*BackupSchedule, error)
GetBackupTarget(ctx context.Context, id int64) (*BackupTarget, error)
@@ -100,6 +103,7 @@ type Querier interface {
GetOrganizationCRLInfo(ctx context.Context, id int64) (*GetOrganizationCRLInfoRow, error)
GetPeerPorts(ctx context.Context) ([]*GetPeerPortsRow, error)
GetPlugin(ctx context.Context, name string) (*Plugin, error)
+ GetPrometheusConfig(ctx context.Context) (*PrometheusConfig, error)
GetProvidersByNotificationType(ctx context.Context, arg *GetProvidersByNotificationTypeParams) ([]*NotificationProvider, error)
GetRecentCompletedBackups(ctx context.Context) ([]*Backup, error)
GetRevokedCertificate(ctx context.Context, arg *GetRevokedCertificateParams) (*FabricRevokedCertificate, error)
@@ -111,18 +115,20 @@ type Querier interface {
GetSetting(ctx context.Context, id int64) (*Setting, error)
GetUser(ctx context.Context, id int64) (*User, error)
GetUserByUsername(ctx context.Context, username string) (*User, error)
+ ListAuditLogs(ctx context.Context, arg *ListAuditLogsParams) ([]*AuditLog, error)
ListBackupSchedules(ctx context.Context) ([]*BackupSchedule, error)
ListBackupTargets(ctx context.Context) ([]*BackupTarget, error)
ListBackups(ctx context.Context, arg *ListBackupsParams) ([]*Backup, error)
ListBackupsBySchedule(ctx context.Context, scheduleID sql.NullInt64) ([]*Backup, error)
ListBackupsByTarget(ctx context.Context, targetID int64) ([]*Backup, error)
ListFabricOrganizations(ctx context.Context) ([]*FabricOrganization, error)
- ListFabricOrganizationsWithKeys(ctx context.Context) ([]*ListFabricOrganizationsWithKeysRow, error)
+ ListFabricOrganizationsWithKeys(ctx context.Context, arg *ListFabricOrganizationsWithKeysParams) ([]*ListFabricOrganizationsWithKeysRow, error)
ListKeyProviders(ctx context.Context) ([]*KeyProvider, error)
ListKeys(ctx context.Context, arg *ListKeysParams) ([]*ListKeysRow, error)
ListNetworkNodesByNetwork(ctx context.Context, networkID int64) ([]*NetworkNode, error)
ListNetworkNodesByNode(ctx context.Context, nodeID int64) ([]*NetworkNode, error)
ListNetworks(ctx context.Context) ([]*Network, error)
+ ListNetworksByPlatform(ctx context.Context, platform string) ([]*Network, error)
ListNodeEvents(ctx context.Context, arg *ListNodeEventsParams) ([]*NodeEvent, error)
ListNodeEventsByType(ctx context.Context, arg *ListNodeEventsByTypeParams) ([]*NodeEvent, error)
ListNodes(ctx context.Context, arg *ListNodesParams) ([]*Node, error)
@@ -133,6 +139,7 @@ type Querier interface {
ListSettings(ctx context.Context) ([]*Setting, error)
ListUsers(ctx context.Context) ([]*User, error)
MarkBackupNotified(ctx context.Context, id int64) error
+ ResetPrometheusConfig(ctx context.Context) (*PrometheusConfig, error)
UnsetDefaultNotificationProvider(ctx context.Context, type_ string) error
UnsetDefaultProvider(ctx context.Context) error
UpdateBackupCompleted(ctx context.Context, arg *UpdateBackupCompletedParams) (*Backup, error)
@@ -162,6 +169,7 @@ type Querier interface {
UpdateNotificationProvider(ctx context.Context, arg *UpdateNotificationProviderParams) (*NotificationProvider, error)
UpdateOrganizationCRL(ctx context.Context, arg *UpdateOrganizationCRLParams) error
UpdatePlugin(ctx context.Context, arg *UpdatePluginParams) (*Plugin, error)
+ UpdatePrometheusConfig(ctx context.Context, arg *UpdatePrometheusConfigParams) (*PrometheusConfig, error)
UpdateProviderTestResults(ctx context.Context, arg *UpdateProviderTestResultsParams) (*NotificationProvider, error)
UpdateSetting(ctx context.Context, arg *UpdateSettingParams) (*Setting, error)
UpdateUser(ctx context.Context, arg *UpdateUserParams) (*User, error)
diff --git a/pkg/db/queries.sql b/pkg/db/queries.sql
index cc56565..20b6da0 100644
--- a/pkg/db/queries.sql
+++ b/pkg/db/queries.sql
@@ -10,6 +10,13 @@ WHERE id = ? LIMIT 1;
SELECT * FROM networks
ORDER BY created_at DESC;
+
+-- name: ListNetworksByPlatform :many
+SELECT * FROM networks
+WHERE
+ (CASE WHEN COALESCE(CAST(@platform AS TEXT), '') = '' THEN 1 ELSE platform = @platform END)
+ORDER BY created_at DESC;
+
-- name: CreateNetwork :one
INSERT INTO networks (
name, platform, status, description, config,
@@ -257,7 +264,8 @@ FROM fabric_organizations fo
LEFT JOIN keys sk ON fo.sign_key_id = sk.id
LEFT JOIN keys tk ON fo.tls_root_key_id = tk.id
LEFT JOIN key_providers p ON fo.provider_id = p.id
-ORDER BY fo.created_at DESC;
+ORDER BY fo.created_at DESC
+LIMIT ? OFFSET ?;
-- name: UpdateNetworkGenesisBlock :one
@@ -400,7 +408,7 @@ WHERE id = ? LIMIT 1;
-- name: ListNodeEvents :many
SELECT * FROM node_events
WHERE node_id = ?
-ORDER BY created_at DESC
+ORDER BY id DESC
LIMIT ? OFFSET ?;
-- name: CountNodeEvents :one
@@ -1021,4 +1029,79 @@ WHERE session_id = ?;
-- name: GetSessionByToken :one
SELECT * FROM sessions
-WHERE token = ?;
\ No newline at end of file
+WHERE token = ?;
+
+-- name: GetPrometheusConfig :one
+SELECT * FROM prometheus_config WHERE id = 1;
+
+-- name: UpdatePrometheusConfig :one
+UPDATE prometheus_config
+SET prometheus_port = ?,
+ data_dir = ?,
+ config_dir = ?,
+ container_name = ?,
+ scrape_interval = ?,
+ evaluation_interval = ?,
+ deployment_mode = ?,
+ docker_image = ?,
+ docker_network = ?,
+ docker_restart_policy = ?,
+ docker_extra_args = ?,
+ updated_at = CURRENT_TIMESTAMP
+WHERE id = 1
+RETURNING *;
+
+-- name: ResetPrometheusConfig :one
+UPDATE prometheus_config
+SET prometheus_port = 9090,
+ data_dir = '/var/lib/prometheus',
+ config_dir = '/etc/prometheus',
+ container_name = 'chainlaunch-prometheus',
+ scrape_interval = 15,
+ evaluation_interval = 15,
+ deployment_mode = 'docker',
+ docker_image = 'prom/prometheus:latest',
+ docker_network = 'chainlaunch-network',
+ docker_restart_policy = 'unless-stopped',
+ docker_extra_args = '--web.enable-lifecycle --web.enable-admin-api',
+ updated_at = CURRENT_TIMESTAMP
+WHERE id = 1
+RETURNING *;
+
+-- name: CreateAuditLog :one
+INSERT INTO audit_logs (
+ timestamp,
+ event_source,
+ user_identity,
+ source_ip,
+ event_type,
+ event_outcome,
+ affected_resource,
+ request_id,
+ severity,
+ details,
+ session_id
+) VALUES (
+ ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?
+)
+RETURNING *;
+
+-- name: GetAuditLog :one
+SELECT * FROM audit_logs
+WHERE id = ? LIMIT 1;
+
+-- name: ListAuditLogs :many
+SELECT * FROM audit_logs
+WHERE (? IS NULL OR timestamp >= ?)
+ AND (? IS NULL OR timestamp <= ?)
+ AND (? = '' OR event_type = ?)
+ AND (? = 0 OR user_identity = ?)
+ORDER BY timestamp DESC
+LIMIT ? OFFSET ?;
+
+-- name: CountAuditLogs :one
+SELECT COUNT(*) FROM audit_logs
+WHERE (? IS NULL OR timestamp >= ?)
+ AND (? IS NULL OR timestamp <= ?)
+ AND (? = '' OR event_type = ?)
+ AND (? = '' OR user_identity = ?);
diff --git a/pkg/db/queries.sql.go b/pkg/db/queries.sql.go
index a7cbab5..25ac62f 100644
--- a/pkg/db/queries.sql.go
+++ b/pkg/db/queries.sql.go
@@ -56,6 +56,41 @@ func (q *Queries) CheckNetworkNodeExists(ctx context.Context, arg *CheckNetworkN
return column_1, err
}
+const CountAuditLogs = `-- name: CountAuditLogs :one
+SELECT COUNT(*) FROM audit_logs
+WHERE (? IS NULL OR timestamp >= ?)
+ AND (? IS NULL OR timestamp <= ?)
+ AND (? = '' OR event_type = ?)
+ AND (? = '' OR user_identity = ?)
+`
+
+type CountAuditLogsParams struct {
+ Column1 interface{} `json:"column1"`
+ Timestamp time.Time `json:"timestamp"`
+ Column3 interface{} `json:"column3"`
+ Timestamp_2 time.Time `json:"timestamp2"`
+ Column5 interface{} `json:"column5"`
+ EventType string `json:"eventType"`
+ Column7 interface{} `json:"column7"`
+ UserIdentity int64 `json:"userIdentity"`
+}
+
+func (q *Queries) CountAuditLogs(ctx context.Context, arg *CountAuditLogsParams) (int64, error) {
+ row := q.db.QueryRowContext(ctx, CountAuditLogs,
+ arg.Column1,
+ arg.Timestamp,
+ arg.Column3,
+ arg.Timestamp_2,
+ arg.Column5,
+ arg.EventType,
+ arg.Column7,
+ arg.UserIdentity,
+ )
+ var count int64
+ err := row.Scan(&count)
+ return count, err
+}
+
const CountBackupsBySchedule = `-- name: CountBackupsBySchedule :one
SELECT COUNT(*) FROM backups
WHERE schedule_id = ?
@@ -137,6 +172,73 @@ func (q *Queries) CountUsers(ctx context.Context) (int64, error) {
return count, err
}
+const CreateAuditLog = `-- name: CreateAuditLog :one
+INSERT INTO audit_logs (
+ timestamp,
+ event_source,
+ user_identity,
+ source_ip,
+ event_type,
+ event_outcome,
+ affected_resource,
+ request_id,
+ severity,
+ details,
+ session_id
+) VALUES (
+ ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?
+)
+RETURNING id, timestamp, event_source, user_identity, source_ip, event_type, event_outcome, affected_resource, request_id, severity, details, created_at, updated_at, session_id
+`
+
+type CreateAuditLogParams struct {
+ Timestamp time.Time `json:"timestamp"`
+ EventSource string `json:"eventSource"`
+ UserIdentity int64 `json:"userIdentity"`
+ SourceIp sql.NullString `json:"sourceIp"`
+ EventType string `json:"eventType"`
+ EventOutcome string `json:"eventOutcome"`
+ AffectedResource sql.NullString `json:"affectedResource"`
+ RequestID sql.NullString `json:"requestId"`
+ Severity sql.NullString `json:"severity"`
+ Details sql.NullString `json:"details"`
+ SessionID sql.NullString `json:"sessionId"`
+}
+
+func (q *Queries) CreateAuditLog(ctx context.Context, arg *CreateAuditLogParams) (*AuditLog, error) {
+ row := q.db.QueryRowContext(ctx, CreateAuditLog,
+ arg.Timestamp,
+ arg.EventSource,
+ arg.UserIdentity,
+ arg.SourceIp,
+ arg.EventType,
+ arg.EventOutcome,
+ arg.AffectedResource,
+ arg.RequestID,
+ arg.Severity,
+ arg.Details,
+ arg.SessionID,
+ )
+ var i AuditLog
+ err := row.Scan(
+ &i.ID,
+ &i.Timestamp,
+ &i.EventSource,
+ &i.UserIdentity,
+ &i.SourceIp,
+ &i.EventType,
+ &i.EventOutcome,
+ &i.AffectedResource,
+ &i.RequestID,
+ &i.Severity,
+ &i.Details,
+ &i.CreatedAt,
+ &i.UpdatedAt,
+ &i.SessionID,
+ )
+ return &i, err
+}
+
const CreateBackup = `-- name: CreateBackup :one
INSERT INTO backups (
schedule_id,
@@ -1393,6 +1495,33 @@ func (q *Queries) GetAllNodes(ctx context.Context) ([]*Node, error) {
return items, nil
}
+const GetAuditLog = `-- name: GetAuditLog :one
+SELECT id, timestamp, event_source, user_identity, source_ip, event_type, event_outcome, affected_resource, request_id, severity, details, created_at, updated_at, session_id FROM audit_logs
+WHERE id = ? LIMIT 1
+`
+
+func (q *Queries) GetAuditLog(ctx context.Context, id int64) (*AuditLog, error) {
+ row := q.db.QueryRowContext(ctx, GetAuditLog, id)
+ var i AuditLog
+ err := row.Scan(
+ &i.ID,
+ &i.Timestamp,
+ &i.EventSource,
+ &i.UserIdentity,
+ &i.SourceIp,
+ &i.EventType,
+ &i.EventOutcome,
+ &i.AffectedResource,
+ &i.RequestID,
+ &i.Severity,
+ &i.Details,
+ &i.CreatedAt,
+ &i.UpdatedAt,
+ &i.SessionID,
+ )
+ return &i, err
+}
+
const GetBackup = `-- name: GetBackup :one
SELECT id, schedule_id, target_id, status, size_bytes, started_at, completed_at, error_message, created_at, notification_sent FROM backups
WHERE id = ? LIMIT 1
@@ -2767,6 +2896,32 @@ func (q *Queries) GetPlugin(ctx context.Context, name string) (*Plugin, error) {
return &i, err
}
+const GetPrometheusConfig = `-- name: GetPrometheusConfig :one
+SELECT id, prometheus_port, data_dir, config_dir, container_name, scrape_interval, evaluation_interval, deployment_mode, docker_image, docker_network, docker_restart_policy, docker_extra_args, created_at, updated_at FROM prometheus_config WHERE id = 1
+`
+
+func (q *Queries) GetPrometheusConfig(ctx context.Context) (*PrometheusConfig, error) {
+ row := q.db.QueryRowContext(ctx, GetPrometheusConfig)
+ var i PrometheusConfig
+ err := row.Scan(
+ &i.ID,
+ &i.PrometheusPort,
+ &i.DataDir,
+ &i.ConfigDir,
+ &i.ContainerName,
+ &i.ScrapeInterval,
+ &i.EvaluationInterval,
+ &i.DeploymentMode,
+ &i.DockerImage,
+ &i.DockerNetwork,
+ &i.DockerRestartPolicy,
+ &i.DockerExtraArgs,
+ &i.CreatedAt,
+ &i.UpdatedAt,
+ )
+ return &i, err
+}
+
const GetProvidersByNotificationType = `-- name: GetProvidersByNotificationType :many
SELECT id, name, type, config, is_default, is_enabled, created_at, updated_at, notify_node_downtime, notify_backup_success, notify_backup_failure, notify_s3_connection_issue, last_test_at, last_test_status, last_test_message FROM notification_providers
WHERE (
@@ -3082,6 +3237,78 @@ func (q *Queries) GetUserByUsername(ctx context.Context, username string) (*User
return &i, err
}
+const ListAuditLogs = `-- name: ListAuditLogs :many
+SELECT id, timestamp, event_source, user_identity, source_ip, event_type, event_outcome, affected_resource, request_id, severity, details, created_at, updated_at, session_id FROM audit_logs
+WHERE (? IS NULL OR timestamp >= ?)
+ AND (? IS NULL OR timestamp <= ?)
+ AND (? = '' OR event_type = ?)
+ AND (? = 0 OR user_identity = ?)
+ORDER BY timestamp DESC
+LIMIT ? OFFSET ?
+`
+
+type ListAuditLogsParams struct {
+ Column1 interface{} `json:"column1"`
+ Timestamp time.Time `json:"timestamp"`
+ Column3 interface{} `json:"column3"`
+ Timestamp_2 time.Time `json:"timestamp2"`
+ Column5 interface{} `json:"column5"`
+ EventType string `json:"eventType"`
+ Column7 interface{} `json:"column7"`
+ UserIdentity int64 `json:"userIdentity"`
+ Limit int64 `json:"limit"`
+ Offset int64 `json:"offset"`
+}
+
+func (q *Queries) ListAuditLogs(ctx context.Context, arg *ListAuditLogsParams) ([]*AuditLog, error) {
+ rows, err := q.db.QueryContext(ctx, ListAuditLogs,
+ arg.Column1,
+ arg.Timestamp,
+ arg.Column3,
+ arg.Timestamp_2,
+ arg.Column5,
+ arg.EventType,
+ arg.Column7,
+ arg.UserIdentity,
+ arg.Limit,
+ arg.Offset,
+ )
+ if err != nil {
+ return nil, err
+ }
+ defer rows.Close()
+ items := []*AuditLog{}
+ for rows.Next() {
+ var i AuditLog
+ if err := rows.Scan(
+ &i.ID,
+ &i.Timestamp,
+ &i.EventSource,
+ &i.UserIdentity,
+ &i.SourceIp,
+ &i.EventType,
+ &i.EventOutcome,
+ &i.AffectedResource,
+ &i.RequestID,
+ &i.Severity,
+ &i.Details,
+ &i.CreatedAt,
+ &i.UpdatedAt,
+ &i.SessionID,
+ ); err != nil {
+ return nil, err
+ }
+ items = append(items, &i)
+ }
+ if err := rows.Close(); err != nil {
+ return nil, err
+ }
+ if err := rows.Err(); err != nil {
+ return nil, err
+ }
+ return items, nil
+}
+
const ListBackupSchedules = `-- name: ListBackupSchedules :many
SELECT id, name, description, cron_expression, target_id, retention_days, enabled, created_at, updated_at, last_run_at, next_run_at FROM backup_schedules
ORDER BY created_at DESC
@@ -3347,8 +3574,14 @@ LEFT JOIN keys sk ON fo.sign_key_id = sk.id
LEFT JOIN keys tk ON fo.tls_root_key_id = tk.id
LEFT JOIN key_providers p ON fo.provider_id = p.id
ORDER BY fo.created_at DESC
+LIMIT ? OFFSET ?
`
+type ListFabricOrganizationsWithKeysParams struct {
+ Limit int64 `json:"limit"`
+ Offset int64 `json:"offset"`
+}
+
type ListFabricOrganizationsWithKeysRow struct {
ID int64 `json:"id"`
MspID string `json:"mspId"`
@@ -3373,8 +3606,8 @@ type ListFabricOrganizationsWithKeysRow struct {
ProviderName sql.NullString `json:"providerName"`
}
-func (q *Queries) ListFabricOrganizationsWithKeys(ctx context.Context) ([]*ListFabricOrganizationsWithKeysRow, error) {
- rows, err := q.db.QueryContext(ctx, ListFabricOrganizationsWithKeys)
+func (q *Queries) ListFabricOrganizationsWithKeys(ctx context.Context, arg *ListFabricOrganizationsWithKeysParams) ([]*ListFabricOrganizationsWithKeysRow, error) {
+ rows, err := q.db.QueryContext(ctx, ListFabricOrganizationsWithKeys, arg.Limit, arg.Offset)
if err != nil {
return nil, err
}
@@ -3661,10 +3894,56 @@ func (q *Queries) ListNetworks(ctx context.Context) ([]*Network, error) {
return items, nil
}
+const ListNetworksByPlatform = `-- name: ListNetworksByPlatform :many
+SELECT id, name, network_id, platform, status, description, config, deployment_config, exposed_ports, domain, created_at, created_by, updated_at, genesis_block_b64, current_config_block_b64 FROM networks
+WHERE
+ (CASE WHEN COALESCE(CAST(?1 AS TEXT), '') = '' THEN 1 ELSE platform = ?1 END)
+ORDER BY created_at DESC
+`
+
+func (q *Queries) ListNetworksByPlatform(ctx context.Context, platform string) ([]*Network, error) {
+ rows, err := q.db.QueryContext(ctx, ListNetworksByPlatform, platform)
+ if err != nil {
+ return nil, err
+ }
+ defer rows.Close()
+ items := []*Network{}
+ for rows.Next() {
+ var i Network
+ if err := rows.Scan(
+ &i.ID,
+ &i.Name,
+ &i.NetworkID,
+ &i.Platform,
+ &i.Status,
+ &i.Description,
+ &i.Config,
+ &i.DeploymentConfig,
+ &i.ExposedPorts,
+ &i.Domain,
+ &i.CreatedAt,
+ &i.CreatedBy,
+ &i.UpdatedAt,
+ &i.GenesisBlockB64,
+ &i.CurrentConfigBlockB64,
+ ); err != nil {
+ return nil, err
+ }
+ items = append(items, &i)
+ }
+ if err := rows.Close(); err != nil {
+ return nil, err
+ }
+ if err := rows.Err(); err != nil {
+ return nil, err
+ }
+ return items, nil
+}
+
const ListNodeEvents = `-- name: ListNodeEvents :many
SELECT id, node_id, event_type, description, data, status, created_at FROM node_events
WHERE node_id = ?
-ORDER BY created_at DESC
+ORDER BY id DESC
LIMIT ? OFFSET ?
`
@@ -4090,6 +4369,46 @@ func (q *Queries) MarkBackupNotified(ctx context.Context, id int64) error {
return err
}
+const ResetPrometheusConfig = `-- name: ResetPrometheusConfig :one
+UPDATE prometheus_config
+SET prometheus_port = 9090,
+ data_dir = '/var/lib/prometheus',
+ config_dir = '/etc/prometheus',
+ container_name = 'chainlaunch-prometheus',
+ scrape_interval = 15,
+ evaluation_interval = 15,
+ deployment_mode = 'docker',
+ docker_image = 'prom/prometheus:latest',
+ docker_network = 'chainlaunch-network',
+ docker_restart_policy = 'unless-stopped',
+ docker_extra_args = '--web.enable-lifecycle --web.enable-admin-api',
+ updated_at = CURRENT_TIMESTAMP
+WHERE id = 1
+RETURNING id, prometheus_port, data_dir, config_dir, container_name, scrape_interval, evaluation_interval, deployment_mode, docker_image, docker_network, docker_restart_policy, docker_extra_args, created_at, updated_at
+`
+
+func (q *Queries) ResetPrometheusConfig(ctx context.Context) (*PrometheusConfig, error) {
+ row := q.db.QueryRowContext(ctx, ResetPrometheusConfig)
+ var i PrometheusConfig
+ err := row.Scan(
+ &i.ID,
+ &i.PrometheusPort,
+ &i.DataDir,
+ &i.ConfigDir,
+ &i.ContainerName,
+ &i.ScrapeInterval,
+ &i.EvaluationInterval,
+ &i.DeploymentMode,
+ &i.DockerImage,
+ &i.DockerNetwork,
+ &i.DockerRestartPolicy,
+ &i.DockerExtraArgs,
+ &i.CreatedAt,
+ &i.UpdatedAt,
+ )
+ return &i, err
+}
+
const UnsetDefaultNotificationProvider = `-- name: UnsetDefaultNotificationProvider :exec
UPDATE notification_providers
SET is_default = 0,
@@ -5130,6 +5449,72 @@ func (q *Queries) UpdatePlugin(ctx context.Context, arg *UpdatePluginParams) (*P
return &i, err
}
+const UpdatePrometheusConfig = `-- name: UpdatePrometheusConfig :one
+UPDATE prometheus_config
+SET prometheus_port = ?,
+ data_dir = ?,
+ config_dir = ?,
+ container_name = ?,
+ scrape_interval = ?,
+ evaluation_interval = ?,
+ deployment_mode = ?,
+ docker_image = ?,
+ docker_network = ?,
+ docker_restart_policy = ?,
+ docker_extra_args = ?,
+ updated_at = CURRENT_TIMESTAMP
+WHERE id = 1
+RETURNING id, prometheus_port, data_dir, config_dir, container_name, scrape_interval, evaluation_interval, deployment_mode, docker_image, docker_network, docker_restart_policy, docker_extra_args, created_at, updated_at
+`
+
+type UpdatePrometheusConfigParams struct {
+ PrometheusPort int64 `json:"prometheusPort"`
+ DataDir string `json:"dataDir"`
+ ConfigDir string `json:"configDir"`
+ ContainerName string `json:"containerName"`
+ ScrapeInterval int64 `json:"scrapeInterval"`
+ EvaluationInterval int64 `json:"evaluationInterval"`
+ DeploymentMode string `json:"deploymentMode"`
+ DockerImage string `json:"dockerImage"`
+ DockerNetwork sql.NullString `json:"dockerNetwork"`
+ DockerRestartPolicy string `json:"dockerRestartPolicy"`
+ DockerExtraArgs sql.NullString `json:"dockerExtraArgs"`
+}
+
+func (q *Queries) UpdatePrometheusConfig(ctx context.Context, arg *UpdatePrometheusConfigParams) (*PrometheusConfig, error) {
+ row := q.db.QueryRowContext(ctx, UpdatePrometheusConfig,
+ arg.PrometheusPort,
+ arg.DataDir,
+ arg.ConfigDir,
+ arg.ContainerName,
+ arg.ScrapeInterval,
+ arg.EvaluationInterval,
+ arg.DeploymentMode,
+ arg.DockerImage,
+ arg.DockerNetwork,
+ arg.DockerRestartPolicy,
+ arg.DockerExtraArgs,
+ )
+ var i PrometheusConfig
+ err := row.Scan(
+ &i.ID,
+ &i.PrometheusPort,
+ &i.DataDir,
+ &i.ConfigDir,
+ &i.ContainerName,
+ &i.ScrapeInterval,
+ &i.EvaluationInterval,
+ &i.DeploymentMode,
+ &i.DockerImage,
+ &i.DockerNetwork,
+ &i.DockerRestartPolicy,
+ &i.DockerExtraArgs,
+ &i.CreatedAt,
+ &i.UpdatedAt,
+ )
+ return &i, err
+}
+
const UpdateProviderTestResults = `-- name: UpdateProviderTestResults :one
UPDATE notification_providers
SET last_test_at = ?,
diff --git a/pkg/fabric/client/client.go b/pkg/fabric/client/client.go
index 39da6aa..d9e6f0a 100644
--- a/pkg/fabric/client/client.go
+++ b/pkg/fabric/client/client.go
@@ -24,6 +24,7 @@ type Client struct {
type Test = networktypes.AddNodeToNetworkRequest
// Use the shared types instead of local definitions
+type PaginatedOrganizationsResponse = orgtypes.PaginatedOrganizationsResponse
type Organization = orgtypes.OrganizationResponse
type CreateOrganizationRequest = orgtypes.CreateOrganizationRequest
type UpdateOrganizationRequest = orgtypes.UpdateOrganizationRequest
@@ -158,13 +159,13 @@ func (c *Client) DeleteOrganization(id int64) error {
}
// ListOrganizations retrieves all organizations
-func (c *Client) ListOrganizations() ([]Organization, error) {
+func (c *Client) ListOrganizations() (*PaginatedOrganizationsResponse, error) {
respBody, err := c.doRequest("GET", "/organizations", nil)
if err != nil {
return nil, err
}
- var orgs []Organization
+ orgs := &PaginatedOrganizationsResponse{}
if err := json.Unmarshal(respBody, &orgs); err != nil {
return nil, fmt.Errorf("failed to unmarshal response: %w", err)
}
@@ -179,4 +180,3 @@ func (c *Client) GetNetworkConfig(networkID int64, organizationID int64) ([]byte
}
return respBody, nil
}
-
diff --git a/pkg/fabric/handler/organization_handler.go b/pkg/fabric/handler/organization_handler.go
index 818a079..fe42ac2 100644
--- a/pkg/fabric/handler/organization_handler.go
+++ b/pkg/fabric/handler/organization_handler.go
@@ -7,6 +7,7 @@ import (
"math/big"
"net/http"
"strconv"
+ "strings"
"time"
"github.com/chainlaunch/chainlaunch/pkg/errors"
@@ -42,6 +43,22 @@ type DeleteRevokedCertificateRequest struct {
SerialNumber string `json:"serialNumber"` // Hex string of the serial number
}
+// PaginatedOrganizationsResponse represents a paginated list of organizations for HTTP response
+// swagger:model PaginatedOrganizationsResponse
+type PaginatedOrganizationsResponse struct {
+ Items []*OrganizationResponse `json:"items"`
+ Limit int64 `json:"limit"`
+ Offset int64 `json:"offset"`
+ Count int `json:"count"`
+}
+
+// ListOrganizationsQuery represents the query parameters for listing organizations
+// swagger:model ListOrganizationsQuery
+type ListOrganizationsQuery struct {
+ Limit int64 `form:"limit" json:"limit" query:"limit" example:"20"`
+ Offset int64 `form:"offset" json:"offset" query:"offset" example:"0"`
+}
+
// RegisterRoutes registers the organization routes
func (h *OrganizationHandler) RegisterRoutes(r chi.Router) {
r.Route("/organizations", func(r chi.Router) {
@@ -91,6 +108,12 @@ func (h *OrganizationHandler) CreateOrganization(w http.ResponseWriter, r *http.
org, err := h.service.CreateOrganization(r.Context(), params)
if err != nil {
+ if strings.Contains(err.Error(), "already exists") {
+ return errors.NewValidationError("organization already exists", map[string]interface{}{
+ "detail": err.Error(),
+ "code": "ORGANIZATION_ALREADY_EXISTS",
+ })
+ }
return errors.NewInternalError("failed to create organization", err, nil)
}
@@ -227,11 +250,43 @@ func (h *OrganizationHandler) DeleteOrganization(w http.ResponseWriter, r *http.
// @Tags Organizations
// @Accept json
// @Produce json
-// @Success 200 {array} OrganizationResponse
+// @Param limit query int false "Maximum number of organizations to return" default(20)
+// @Param offset query int false "Number of organizations to skip" default(0)
+// @Success 200 {object} PaginatedOrganizationsResponse
// @Failure 500 {object} map[string]string
// @Router /organizations [get]
func (h *OrganizationHandler) ListOrganizations(w http.ResponseWriter, r *http.Request) error {
- orgs, err := h.service.ListOrganizations(r.Context())
+ // Parse pagination query params
+ limitStr := r.URL.Query().Get("limit")
+ offsetStr := r.URL.Query().Get("offset")
+ var (
+ limit int64 = 20 // default limit
+ offset int64 = 0
+ err error
+ )
+ if limitStr != "" {
+ limit, err = strconv.ParseInt(limitStr, 10, 64)
+ if err != nil || limit <= 0 {
+ return errors.NewValidationError("invalid limit parameter", map[string]interface{}{
+ "detail": "limit must be a positive integer",
+ "code": "INVALID_LIMIT",
+ })
+ }
+ }
+ if offsetStr != "" {
+ offset, err = strconv.ParseInt(offsetStr, 10, 64)
+ if err != nil || offset < 0 {
+ return errors.NewValidationError("invalid offset parameter", map[string]interface{}{
+ "detail": "offset must be a non-negative integer",
+ "code": "INVALID_OFFSET",
+ })
+ }
+ }
+
+ orgs, err := h.service.ListOrganizations(r.Context(), service.PaginationParams{
+ Limit: limit,
+ Offset: offset,
+ })
if err != nil {
return errors.NewInternalError("failed to list organizations", err, nil)
}
@@ -241,7 +296,15 @@ func (h *OrganizationHandler) ListOrganizations(w http.ResponseWriter, r *http.R
orgResponses[i] = toOrganizationResponse(&org)
}
- return response.WriteJSON(w, http.StatusOK, orgResponses)
+ // Optionally, you can return pagination info in the response
+ resp := PaginatedOrganizationsResponse{
+ Items: orgResponses,
+ Limit: limit,
+ Offset: offset,
+ Count: len(orgResponses),
+ }
+
+ return response.WriteJSON(w, http.StatusOK, resp)
}
// @Summary Revoke a certificate using its serial number
diff --git a/pkg/fabric/handler/types.go b/pkg/fabric/handler/types.go
index 484377c..94d71af 100644
--- a/pkg/fabric/handler/types.go
+++ b/pkg/fabric/handler/types.go
@@ -31,11 +31,14 @@ type OrganizationResponse struct {
UpdatedAt time.Time `json:"updatedAt"`
ProviderID int64 `json:"providerId"`
ProviderName string `json:"providerName,omitempty"`
+ AdminTlsKeyID int64 `json:"adminTlsKeyId,omitempty"`
+ AdminSignKeyID int64 `json:"adminSignKeyId,omitempty"`
+ ClientSignKeyID int64 `json:"clientSignKeyId,omitempty"`
}
// Convert service DTO to HTTP response
func toOrganizationResponse(dto *service.OrganizationDTO) *OrganizationResponse {
- return &OrganizationResponse{
+ resp := &OrganizationResponse{
ID: dto.ID,
MspID: dto.MspID,
Description: dto.Description.String,
@@ -48,4 +51,16 @@ func toOrganizationResponse(dto *service.OrganizationDTO) *OrganizationResponse
ProviderID: dto.ProviderID,
ProviderName: dto.ProviderName,
}
+
+ if dto.AdminTlsKeyID.Valid {
+ resp.AdminTlsKeyID = dto.AdminTlsKeyID.Int64
+ }
+ if dto.AdminSignKeyID.Valid {
+ resp.AdminSignKeyID = dto.AdminSignKeyID.Int64
+ }
+ if dto.ClientSignKeyID.Valid {
+ resp.ClientSignKeyID = dto.ClientSignKeyID.Int64
+ }
+
+ return resp
}
diff --git a/pkg/fabric/networkconfig/parser.go b/pkg/fabric/networkconfig/parser.go
index 14e80a1..9117e05 100644
--- a/pkg/fabric/networkconfig/parser.go
+++ b/pkg/fabric/networkconfig/parser.go
@@ -7,6 +7,21 @@ import (
"gopkg.in/yaml.v3"
)
+// readPEMOrPath reads content from either a PEM string or a file path
+func readPEMOrPath(pem, path string) (string, error) {
+ if pem != "" {
+ return pem, nil
+ }
+ if path != "" {
+ content, err := os.ReadFile(path)
+ if err != nil {
+ return "", err
+ }
+ return string(content), nil
+ }
+ return "", nil
+}
+
// LoadFromFile loads a network configuration from a YAML file
func LoadFromFile(path string) (*NetworkConfig, error) {
file, err := os.Open(path)
@@ -15,7 +30,74 @@ func LoadFromFile(path string) (*NetworkConfig, error) {
}
defer file.Close()
- return LoadFromReader(file)
+ config, err := LoadFromReader(file)
+ if err != nil {
+ return nil, err
+ }
+
+ // Process all certificates and keys
+ for orgName, org := range config.Organizations {
+ for userName, user := range org.Users {
+ // Process user certificate
+ if user.Cert.PEM == "" && user.Cert.Path != "" {
+ certContent, err := readPEMOrPath(user.Cert.PEM, user.Cert.Path)
+ if err != nil {
+ return nil, err
+ }
+ user.Cert.PEM = certContent
+ }
+
+ // Process user key
+ if user.Key.PEM == "" && user.Key.Path != "" {
+ keyContent, err := readPEMOrPath(user.Key.PEM, user.Key.Path)
+ if err != nil {
+ return nil, err
+ }
+ user.Key.PEM = keyContent
+ }
+
+ org.Users[userName] = user
+ }
+ config.Organizations[orgName] = org
+ }
+
+ // Process peer TLS certificates
+ for peerName, peer := range config.Peers {
+ if peer.TLSCACerts.PEM == "" && peer.TLSCACerts.Path != "" {
+ certContent, err := readPEMOrPath(peer.TLSCACerts.PEM, peer.TLSCACerts.Path)
+ if err != nil {
+ return nil, err
+ }
+ peer.TLSCACerts.PEM = certContent
+ }
+ config.Peers[peerName] = peer
+ }
+
+ // Process orderer TLS certificates
+ for ordererName, orderer := range config.Orderers {
+ if orderer.TLSCACerts.PEM == "" && orderer.TLSCACerts.Path != "" {
+ certContent, err := readPEMOrPath(orderer.TLSCACerts.PEM, orderer.TLSCACerts.Path)
+ if err != nil {
+ return nil, err
+ }
+ orderer.TLSCACerts.PEM = certContent
+ }
+ config.Orderers[ordererName] = orderer
+ }
+
+ // Process CA TLS certificates
+ for caName, ca := range config.CertificateAuthorities {
+ if ca.TLSCACerts.PEM == "" && ca.TLSCACerts.Path != "" {
+ certContent, err := readPEMOrPath(ca.TLSCACerts.PEM, ca.TLSCACerts.Path)
+ if err != nil {
+ return nil, err
+ }
+ ca.TLSCACerts.PEM = certContent
+ }
+ config.CertificateAuthorities[caName] = ca
+ }
+
+ return config, nil
}
// LoadFromReader loads a network configuration from an io.Reader
diff --git a/pkg/fabric/networkconfig/types.go b/pkg/fabric/networkconfig/types.go
index 1d3e6d6..d0ae7a6 100644
--- a/pkg/fabric/networkconfig/types.go
+++ b/pkg/fabric/networkconfig/types.go
@@ -34,12 +34,14 @@ type User struct {
// UserCert represents a user's certificate
type UserCert struct {
- PEM string `yaml:"pem"`
+ PEM string `yaml:"pem,omitempty"`
+ Path string `yaml:"path,omitempty"`
}
// UserKey represents a user's private key
type UserKey struct {
- PEM string `yaml:"pem"`
+ PEM string `yaml:"pem,omitempty"`
+ Path string `yaml:"path,omitempty"`
}
// Orderer represents an orderer node
@@ -65,15 +67,16 @@ type GRPCOptions struct {
// TLSCACerts represents TLS CA certificates
type TLSCACerts struct {
- PEM string `yaml:"pem"`
+ PEM string `yaml:"pem,omitempty"`
+ Path string `yaml:"path,omitempty"`
}
// CertificateAuthority represents a CA server
type CertificateAuthority struct {
- URL string `yaml:"url"`
- Registrar Registrar `yaml:"registrar"`
- CAName string `yaml:"caName"`
- TLSCACerts []TLSCACerts `yaml:"tlsCACerts"`
+ URL string `yaml:"url"`
+ Registrar Registrar `yaml:"registrar"`
+ CAName string `yaml:"caName"`
+ TLSCACerts TLSCACerts `yaml:"tlsCACerts"`
}
// Registrar represents CA registrar information
diff --git a/pkg/fabric/service/organization_service.go b/pkg/fabric/service/organization_service.go
index 3110c50..e1234fb 100644
--- a/pkg/fabric/service/organization_service.go
+++ b/pkg/fabric/service/organization_service.go
@@ -64,6 +64,13 @@ type RevokedCertificateDTO struct {
Reason int64 `json:"reason"`
}
+// PaginationParams represents pagination input for listing organizations
+// You may want to move this to a shared location if used elsewhere
+type PaginationParams struct {
+ Limit int64
+ Offset int64
+}
+
type OrganizationService struct {
queries *db.Queries
keyManagement *keymanagement.KeyManagementService
@@ -152,10 +159,18 @@ func toOrganizationListDTO(org *db.ListFabricOrganizationsWithKeysRow) *Organiza
UpdatedAt: org.UpdatedAt.Time,
ProviderID: org.ProviderID.Int64,
ProviderName: providerName,
+ AdminTlsKeyID: org.AdminTlsKeyID,
+ AdminSignKeyID: org.AdminSignKeyID,
+ ClientSignKeyID: org.ClientSignKeyID,
}
}
func (s *OrganizationService) CreateOrganization(ctx context.Context, params CreateOrganizationParams) (*OrganizationDTO, error) {
+ // Uniqueness check by MSP ID
+ if existing, _ := s.queries.GetFabricOrganizationByMSPID(ctx, params.MspID); existing != nil && existing.ID != 0 {
+ return nil, fmt.Errorf("organization with MSP ID '%s' already exists", params.MspID)
+ }
+
description := fmt.Sprintf("Sign key for organization %s", params.MspID)
curve := models.ECCurveP256
// Create SIGN key
@@ -438,8 +453,11 @@ func (s *OrganizationService) DeleteOrganization(ctx context.Context, id int64)
return nil
}
-func (s *OrganizationService) ListOrganizations(ctx context.Context) ([]OrganizationDTO, error) {
- orgs, err := s.queries.ListFabricOrganizationsWithKeys(ctx)
+func (s *OrganizationService) ListOrganizations(ctx context.Context, params PaginationParams) ([]OrganizationDTO, error) {
+ orgs, err := s.queries.ListFabricOrganizationsWithKeys(ctx, &db.ListFabricOrganizationsWithKeysParams{
+ Limit: params.Limit,
+ Offset: params.Offset,
+ })
if err != nil {
return nil, fmt.Errorf("failed to list organizations: %w", err)
}
diff --git a/pkg/http/annotations.go b/pkg/http/annotations.go
new file mode 100644
index 0000000..7341069
--- /dev/null
+++ b/pkg/http/annotations.go
@@ -0,0 +1,87 @@
+package http
+
+import (
+ "context"
+ "io"
+ "net/http"
+ "strings"
+)
+
+// ResourceKey is the context key for the resource
+type ResourceKey string
+
+const (
+ // ResourceContextKey is the key used to store the resource in the context
+ ResourceContextKey ResourceKey = "resource"
+)
+
+// Resource represents an API resource
+type Resource struct {
+ // Type is the type of resource (e.g., "user", "project", "deployment")
+ Type string
+ // ID is the identifier of the resource (if applicable)
+ ID string
+ // Action is the action being performed on the resource (e.g., "create", "update", "delete")
+ Action string
+ // Body is the request body (if available)
+ Body []byte
+}
+
+// WithResource adds a resource annotation to the request context
+func WithResource(r *http.Request, resource Resource) *http.Request {
+ return r.WithContext(context.WithValue(r.Context(), ResourceContextKey, resource))
+}
+
+// ResourceFromContext retrieves the resource from the request context
+func ResourceFromContext(r *http.Request) (Resource, bool) {
+ resource, ok := r.Context().Value(ResourceContextKey).(Resource)
+ return resource, ok
+}
+
+// ResourceMiddleware creates a middleware that adds resource information to the request context
+func ResourceMiddleware(resourceType string) func(http.Handler) http.Handler {
+ return func(next http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ // Extract resource ID from path if it exists
+ // Assuming path format: /api/v1/{resourceType}/{id}
+ parts := strings.Split(r.URL.Path, "/")
+ var resourceID string
+ if len(parts) >= 5 {
+ resourceID = parts[4]
+ }
+
+ // Determine action based on HTTP method
+ action := "view"
+ switch r.Method {
+ case http.MethodPost:
+ action = "create"
+ case http.MethodPut, http.MethodPatch:
+ action = "update"
+ case http.MethodDelete:
+ action = "delete"
+ }
+
+ // Get request body from Chi's context if available
+ var body []byte
+ if r.Method == http.MethodPost || r.Method == http.MethodPut || r.Method == http.MethodPatch {
+ if r.Body != nil {
+ // Read the body
+ body, _ = io.ReadAll(r.Body)
+ // Restore the body for the actual handler
+ r.Body = io.NopCloser(strings.NewReader(string(body)))
+ }
+ }
+
+ resource := Resource{
+ Type: resourceType,
+ ID: resourceID,
+ Action: action,
+ Body: body,
+ }
+
+ // Add resource to context
+ r = WithResource(r, resource)
+ next.ServeHTTP(w, r)
+ })
+ }
+}
diff --git a/pkg/http/response/response.go b/pkg/http/response/response.go
index 8ec5afa..1c7be42 100644
--- a/pkg/http/response/response.go
+++ b/pkg/http/response/response.go
@@ -7,16 +7,29 @@ import (
"github.com/chainlaunch/chainlaunch/pkg/errors"
)
+// Response represents a standard API response
type Response struct {
- Success bool `json:"success"`
- Data interface{} `json:"data,omitempty"`
- Error *ErrorResponse `json:"error,omitempty"`
+ Message string `json:"message,omitempty"`
+ Data interface{} `json:"data,omitempty"`
}
+// ErrorResponse represents an error response
type ErrorResponse struct {
- Type string `json:"type"`
- Message string `json:"message"`
- Details map[string]interface{} `json:"details,omitempty"`
+ Error string `json:"error"`
+}
+
+// JSON sends a JSON response
+func JSON(w http.ResponseWriter, status int, data interface{}) {
+ w.Header().Set("Content-Type", "application/json")
+ w.WriteHeader(status)
+ json.NewEncoder(w).Encode(data)
+}
+
+// Error sends an error response
+func Error(w http.ResponseWriter, status int, message string) {
+ JSON(w, status, ErrorResponse{
+ Error: message,
+ })
}
// Handler is a custom type for http handlers that can return errors
@@ -48,12 +61,8 @@ func WriteError(w http.ResponseWriter, err error) {
switch e := err.(type) {
case *errors.AppError:
response = Response{
- Success: false,
- Error: &ErrorResponse{
- Type: string(e.Type),
- Message: e.Message,
- Details: e.Details,
- },
+ Message: e.Message,
+ Data: e.Details,
}
// Map error types to HTTP status codes
@@ -75,11 +84,7 @@ func WriteError(w http.ResponseWriter, err error) {
}
default:
response = Response{
- Success: false,
- Error: &ErrorResponse{
- Type: string(errors.InternalError),
- Message: "An unexpected error occurred",
- },
+ Message: "An unexpected error occurred",
}
statusCode = http.StatusInternalServerError
}
diff --git a/pkg/metrics/client.go b/pkg/metrics/client.go
new file mode 100644
index 0000000..6fc78a5
--- /dev/null
+++ b/pkg/metrics/client.go
@@ -0,0 +1,211 @@
+package metrics
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "strings"
+ "time"
+
+ "github.com/chainlaunch/chainlaunch/pkg/metrics/common"
+)
+
+// Client handles querying Prometheus for metrics
+type Client struct {
+ baseURL string
+ httpClient *http.Client
+}
+
+// NewClient creates a new metrics client
+func NewClient(baseURL string) *Client {
+ return &Client{
+ baseURL: baseURL,
+ httpClient: &http.Client{
+ Timeout: 10 * time.Second,
+ },
+ }
+}
+
+// Query executes a PromQL query against Prometheus
+func (c *Client) Query(ctx context.Context, query string) (*common.QueryResult, error) {
+ u, err := url.Parse(c.baseURL)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse base URL: %w", err)
+ }
+
+ u.Path = "/api/v1/query"
+ q := u.Query()
+ q.Set("query", query)
+ u.RawQuery = q.Encode()
+
+ req, err := http.NewRequestWithContext(ctx, "GET", u.String(), nil)
+ if err != nil {
+ return nil, fmt.Errorf("failed to create request: %w", err)
+ }
+
+ resp, err := c.httpClient.Do(req)
+ if err != nil {
+ return nil, fmt.Errorf("failed to execute request: %w", err)
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode != http.StatusOK {
+ return nil, fmt.Errorf("unexpected status code: %d", resp.StatusCode)
+ }
+
+ var result common.QueryResult
+ if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
+ return nil, fmt.Errorf("failed to decode response: %w", err)
+ }
+
+ return &result, nil
+}
+
+// QueryRange executes a PromQL query with a time range
+func (c *Client) QueryRange(ctx context.Context, query string, start, end time.Time, step time.Duration) (*common.QueryResult, error) {
+ u, err := url.Parse(c.baseURL)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse base URL: %w", err)
+ }
+
+ u.Path = "/api/v1/query_range"
+ q := u.Query()
+ q.Set("query", query)
+ q.Set("start", fmt.Sprintf("%d", start.Unix()))
+ q.Set("end", fmt.Sprintf("%d", end.Unix()))
+ q.Set("step", step.String())
+ u.RawQuery = q.Encode()
+ queryUrl := u.String()
+ req, err := http.NewRequestWithContext(ctx, "GET", queryUrl, nil)
+ if err != nil {
+ return nil, fmt.Errorf("failed to create request: %w", err)
+ }
+
+ resp, err := c.httpClient.Do(req)
+ if err != nil {
+ return nil, fmt.Errorf("failed to execute request: %w", err)
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode != http.StatusOK {
+ // Read the response body for error details
+ bodyBytes, readErr := io.ReadAll(resp.Body)
+ if readErr != nil {
+ return nil, fmt.Errorf("unexpected status code: %d (failed to read response body: %v)", resp.StatusCode, readErr)
+ }
+
+ // Reset the response body for potential further reads
+ resp.Body = io.NopCloser(bytes.NewBuffer(bodyBytes))
+
+ return nil, fmt.Errorf("unexpected status code: %d, response: %s", resp.StatusCode, string(bodyBytes))
+ }
+
+ var result common.QueryResult
+ if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
+ return nil, fmt.Errorf("failed to decode response: %w", err)
+ }
+
+ return &result, nil
+}
+
+// Common metric queries
+const (
+ // NodeCPUUsage returns CPU usage percentage for a node
+ NodeCPUUsage = `rate(node_cpu_seconds_total{mode="user"}[5m]) * 100`
+ // NodeMemoryUsage returns memory usage percentage for a node
+ NodeMemoryUsage = `(node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes) / node_memory_MemTotal_bytes * 100`
+ // NodeDiskUsage returns disk usage percentage for a node
+ NodeDiskUsage = `(node_filesystem_size_bytes{mountpoint="/"} - node_filesystem_free_bytes{mountpoint="/"}) / node_filesystem_size_bytes{mountpoint="/"} * 100`
+ // NodeNetworkIO returns network I/O in bytes per second
+ NodeNetworkIO = `rate(node_network_receive_bytes_total[5m])`
+)
+
+// GetNodeMetrics returns common metrics for a specific node
+func (c *Client) GetNodeMetrics(ctx context.Context, nodeName string) (map[string]float64, error) {
+ metrics := make(map[string]float64)
+
+ // Query CPU usage
+ cpuResult, err := c.Query(ctx, fmt.Sprintf(`%s{instance="%s"}`, NodeCPUUsage, nodeName))
+ if err != nil {
+ return nil, fmt.Errorf("failed to query CPU usage: %w", err)
+ }
+ if len(cpuResult.Data.Result) > 0 {
+ if value, ok := cpuResult.Data.Result[0].Value[1].(float64); ok {
+ metrics["cpu_usage"] = value
+ }
+ }
+
+ // Query memory usage
+ memResult, err := c.Query(ctx, fmt.Sprintf(`%s{instance="%s"}`, NodeMemoryUsage, nodeName))
+ if err != nil {
+ return nil, fmt.Errorf("failed to query memory usage: %w", err)
+ }
+ if len(memResult.Data.Result) > 0 {
+ if value, ok := memResult.Data.Result[0].Value[1].(float64); ok {
+ metrics["memory_usage"] = value
+ }
+ }
+
+ // Query disk usage
+ diskResult, err := c.Query(ctx, fmt.Sprintf(`%s{instance="%s"}`, NodeDiskUsage, nodeName))
+ if err != nil {
+ return nil, fmt.Errorf("failed to query disk usage: %w", err)
+ }
+ if len(diskResult.Data.Result) > 0 {
+ if value, ok := diskResult.Data.Result[0].Value[1].(float64); ok {
+ metrics["disk_usage"] = value
+ }
+ }
+
+ return metrics, nil
+}
+
+// GetLabelValues retrieves values for a specific label
+func (c *Client) GetLabelValues(ctx context.Context, labelName string, matches []string) ([]string, error) {
+ baseURL := fmt.Sprintf("%s/api/v1/label/%s/values", c.baseURL, labelName)
+
+ queryUrl := baseURL
+ if len(matches) > 0 {
+ var matchParams []string
+ for _, match := range matches {
+ // URL encode the match parameter to handle special characters
+ encodedMatch := url.QueryEscape(match)
+ matchParams = append(matchParams, "match[]="+encodedMatch)
+ }
+ queryUrl = baseURL + "?" + strings.Join(matchParams, "&")
+ }
+ req, err := http.NewRequestWithContext(ctx, "GET", queryUrl, nil)
+ if err != nil {
+ return nil, fmt.Errorf("failed to create request: %w", err)
+ }
+
+ resp, err := c.httpClient.Do(req)
+ if err != nil {
+ return nil, fmt.Errorf("failed to execute request: %w", err)
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode != http.StatusOK {
+ body, _ := io.ReadAll(resp.Body)
+ return nil, fmt.Errorf("prometheus API error: %s - %s", resp.Status, string(body))
+ }
+
+ var result struct {
+ Status string `json:"status"`
+ Data []string `json:"data"`
+ }
+
+ if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
+ return nil, fmt.Errorf("failed to decode response: %w", err)
+ }
+
+ if result.Status != "success" {
+ return nil, fmt.Errorf("prometheus API returned non-success status: %s", result.Status)
+ }
+
+ return result.Data, nil
+}
diff --git a/pkg/metrics/common/common.go b/pkg/metrics/common/common.go
new file mode 100644
index 0000000..13fd9ae
--- /dev/null
+++ b/pkg/metrics/common/common.go
@@ -0,0 +1,91 @@
+package common
+
+import (
+ "context"
+ "time"
+)
+
+// Config represents the configuration for the metrics service
+type Config struct {
+ // PrometheusVersion is the version of Prometheus to deploy
+ PrometheusVersion string
+ // PrometheusPort is the port Prometheus will listen on
+ PrometheusPort int
+ // ScrapeInterval is the interval between scrapes
+ ScrapeInterval time.Duration
+ // DeploymentMode specifies how Prometheus is deployed (currently only supports "docker")
+ DeploymentMode string
+}
+
+// DefaultConfig returns a Config with sensible default values
+func DefaultConfig() *Config {
+ return &Config{
+ PrometheusVersion: "v3.3.1",
+ PrometheusPort: 9090,
+ ScrapeInterval: 15 * time.Second,
+ DeploymentMode: "docker",
+ }
+}
+
+// Service defines the interface for metrics operations
+type Service interface {
+ // Start starts the Prometheus instance with the given configuration
+ Start(ctx context.Context, config *Config) error
+
+ // Stop stops the Prometheus instance
+ Stop(ctx context.Context) error
+
+ // QueryMetrics retrieves metrics for a specific node
+ QueryMetrics(ctx context.Context, nodeID int64, query string) (map[string]interface{}, error)
+
+ // QueryMetricsRange retrieves metrics for a specific node within a time range
+ QueryMetricsRange(ctx context.Context, nodeID int64, query string, start, end time.Time, step time.Duration) (map[string]interface{}, error)
+
+ // GetLabelValues retrieves values for a specific label
+ GetLabelValues(ctx context.Context, nodeID int64, labelName string, matches []string) ([]string, error)
+
+ // Reload reloads the Prometheus configuration
+ Reload(ctx context.Context) error
+
+ // Query executes a PromQL query for a specific node
+ Query(ctx context.Context, nodeID int64, query string) (*QueryResult, error)
+
+ // QueryRange executes a PromQL query with a time range for a specific node
+ QueryRange(ctx context.Context, nodeID int64, query string, start, end time.Time, step time.Duration) (*QueryResult, error)
+
+ // GetStatus returns the current status of the Prometheus instance
+ GetStatus(ctx context.Context) (*Status, error)
+}
+
+// QueryResult represents the result of a Prometheus query
+type QueryResult struct {
+ Status string `json:"status"`
+ Data struct {
+ ResultType string `json:"resultType"`
+ Result []struct {
+ Metric map[string]string `json:"metric"`
+ // For instant queries
+ Value []interface{} `json:"value,omitempty"`
+ // For range queries (matrix)
+ Values [][]interface{} `json:"values,omitempty"`
+ } `json:"result"`
+ } `json:"data"`
+}
+
+// Status represents the current status of the Prometheus instance
+type Status struct {
+ // Status is the current status of the Prometheus instance (e.g. "running", "stopped", "not_deployed")
+ Status string `json:"status"`
+ // Version is the version of Prometheus being used
+ Version string `json:"version,omitempty"`
+ // Port is the port Prometheus is listening on
+ Port int `json:"port,omitempty"`
+ // ScrapeInterval is the current scrape interval
+ ScrapeInterval time.Duration `json:"scrape_interval,omitempty"`
+ // DeploymentMode is the current deployment mode
+ DeploymentMode string `json:"deployment_mode,omitempty"`
+ // StartedAt is when the instance was started
+ StartedAt *time.Time `json:"started_at,omitempty"`
+ // Error is any error that occurred while getting the status
+ Error string `json:"error,omitempty"`
+}
diff --git a/pkg/metrics/handler.go b/pkg/metrics/handler.go
new file mode 100644
index 0000000..2fb1951
--- /dev/null
+++ b/pkg/metrics/handler.go
@@ -0,0 +1,389 @@
+package metrics
+
+import (
+ "encoding/json"
+ "net/http"
+ "strconv"
+ "time"
+
+ "github.com/chainlaunch/chainlaunch/pkg/logger"
+ "github.com/chainlaunch/chainlaunch/pkg/metrics/common"
+ "github.com/go-chi/chi/v5"
+)
+
+// Node represents a node to be monitored
+type Node struct {
+ ID string
+ Address string
+ Port int
+}
+
+// Handler handles HTTP requests for metrics
+type Handler struct {
+ service common.Service
+ logger *logger.Logger
+}
+
+// NewHandler creates a new metrics handler
+func NewHandler(service common.Service, logger *logger.Logger) *Handler {
+ return &Handler{
+ service: service,
+ logger: logger,
+ }
+}
+
+// RegisterRoutes registers the metrics routes
+func (h *Handler) RegisterRoutes(r chi.Router) {
+ r.Route("/api/v1/metrics", func(r chi.Router) {
+ r.Post("/deploy", h.DeployPrometheus)
+ r.Get("/node/{id}", h.GetNodeMetrics)
+ r.Post("/reload", h.ReloadConfiguration)
+ r.Get("/node/{id}/label/{label}/values", h.GetLabelValues)
+ r.Get("/node/{id}/range", h.GetNodeMetricsRange)
+ r.Post("/node/{id}/query", h.CustomQuery)
+ r.Get("/status", h.GetStatus)
+ })
+}
+
+// DeployPrometheusRequest represents the request to deploy Prometheus
+type DeployPrometheusRequest struct {
+ PrometheusVersion string `json:"prometheus_version" binding:"required"`
+ PrometheusPort int `json:"prometheus_port" binding:"required"`
+ ScrapeInterval int `json:"scrape_interval" binding:"required"`
+ DeploymentMode string `json:"deployment_mode" binding:"required"`
+}
+
+// DeployPrometheus deploys a new Prometheus instance
+// @Summary Deploy a new Prometheus instance
+// @Description Deploys a new Prometheus instance with the specified configuration
+// @Tags metrics
+// @Accept json
+// @Produce json
+// @Param request body DeployPrometheusRequest true "Prometheus deployment configuration"
+// @Success 200 {object} map[string]string
+// @Failure 400 {object} map[string]string
+// @Failure 500 {object} map[string]string
+// @Router /api/v1/metrics/deploy [post]
+func (h *Handler) DeployPrometheus(w http.ResponseWriter, r *http.Request) {
+ var req DeployPrometheusRequest
+ if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
+ http.Error(w, err.Error(), http.StatusBadRequest)
+ return
+ }
+
+ config := &common.Config{
+ PrometheusVersion: req.PrometheusVersion,
+ PrometheusPort: req.PrometheusPort,
+ ScrapeInterval: time.Duration(req.ScrapeInterval) * time.Second,
+ DeploymentMode: req.DeploymentMode,
+ }
+
+ if err := h.service.Start(r.Context(), config); err != nil {
+ h.logger.Error("Failed to deploy Prometheus", "error", err)
+ http.Error(w, "Failed to deploy Prometheus", http.StatusInternalServerError)
+ return
+ }
+
+ w.Header().Set("Content-Type", "application/json")
+ w.WriteHeader(http.StatusOK)
+ json.NewEncoder(w).Encode(map[string]string{"message": "Prometheus deployed successfully"})
+}
+
+// RefreshNodesRequest represents the request to refresh nodes
+type RefreshNodesRequest struct {
+ Nodes []struct {
+ ID string `json:"id" binding:"required"`
+ Address string `json:"address" binding:"required"`
+ Port int `json:"port" binding:"required"`
+ } `json:"nodes" binding:"required"`
+}
+
+// GetNodeMetrics retrieves metrics for a specific node
+// @Summary Get metrics for a specific node
+// @Description Retrieves metrics for a specific node by ID and optional PromQL query
+// @Tags metrics
+// @Produce json
+// @Param id path string true "Node ID"
+// @Param query query string false "PromQL query to filter metrics"
+// @Success 200 {object} map[string]interface{}
+// @Failure 400 {object} map[string]string
+// @Failure 500 {object} map[string]string
+// @Router /api/v1/metrics/node/{id} [get]
+func (h *Handler) GetNodeMetrics(w http.ResponseWriter, r *http.Request) {
+ nodeID := chi.URLParam(r, "id")
+ if nodeID == "" {
+ http.Error(w, "Node ID is required", http.StatusBadRequest)
+ return
+ }
+ nodeIDInt, err := strconv.ParseInt(nodeID, 10, 64)
+ if err != nil {
+ http.Error(w, "invalid node ID", http.StatusBadRequest)
+ return
+ }
+
+ // Get PromQL query from query parameter
+ query := r.URL.Query().Get("query")
+
+ metrics, err := h.service.QueryMetrics(r.Context(), nodeIDInt, query)
+ if err != nil {
+ h.logger.Error("Failed to get node metrics", "error", err)
+ http.Error(w, "Failed to get node metrics", http.StatusInternalServerError)
+ return
+ }
+
+ w.Header().Set("Content-Type", "application/json")
+ w.WriteHeader(http.StatusOK)
+ json.NewEncoder(w).Encode(metrics)
+}
+
+// ReloadConfiguration reloads the Prometheus configuration
+// @Summary Reload Prometheus configuration
+// @Description Triggers a reload of the Prometheus configuration to pick up any changes
+// @Tags metrics
+// @Produce json
+// @Success 200 {object} map[string]string
+// @Failure 500 {object} map[string]string
+// @Router /api/v1/metrics/reload [post]
+func (h *Handler) ReloadConfiguration(w http.ResponseWriter, r *http.Request) {
+ if err := h.service.Reload(r.Context()); err != nil {
+ h.logger.Error("Failed to reload Prometheus configuration", "error", err)
+ http.Error(w, "Failed to reload Prometheus configuration", http.StatusInternalServerError)
+ return
+ }
+
+ w.Header().Set("Content-Type", "application/json")
+ w.WriteHeader(http.StatusOK)
+ json.NewEncoder(w).Encode(map[string]string{"message": "Prometheus configuration reloaded successfully"})
+}
+
+// @Summary Get label values for a specific label
+// @Description Retrieves all values for a specific label, optionally filtered by metric matches and node ID
+// @Tags metrics
+// @Accept json
+// @Produce json
+// @Param id path string true "Node ID"
+// @Param label path string true "Label name"
+// @Param match query array false "Metric matches (e.g. {__name__=\"metric_name\"})"
+// @Success 200 {object} map[string]interface{} "Label values"
+// @Failure 400 {object} map[string]interface{} "Bad request"
+// @Failure 500 {object} map[string]interface{} "Internal server error"
+// @Router /api/v1/metrics/node/{id}/label/{label}/values [get]
+func (h *Handler) GetLabelValues(w http.ResponseWriter, r *http.Request) {
+ nodeID := chi.URLParam(r, "id")
+ if nodeID == "" {
+ http.Error(w, "node ID is required", http.StatusBadRequest)
+ return
+ }
+
+ nodeIDInt, err := strconv.ParseInt(nodeID, 10, 64)
+ if err != nil {
+ http.Error(w, "invalid node ID", http.StatusBadRequest)
+ return
+ }
+
+ labelName := chi.URLParam(r, "label")
+ if labelName == "" {
+ http.Error(w, "label name is required", http.StatusBadRequest)
+ return
+ }
+
+ // Get matches from query parameters
+ matches := r.URL.Query()["match"]
+
+ values, err := h.service.GetLabelValues(r.Context(), nodeIDInt, labelName, matches)
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ w.Header().Set("Content-Type", "application/json")
+ w.WriteHeader(http.StatusOK)
+ json.NewEncoder(w).Encode(map[string]interface{}{
+ "status": "success",
+ "data": values,
+ })
+}
+
+// @Summary Get metrics for a specific node with time range
+// @Description Retrieves metrics for a specific node within a specified time range
+// @Tags metrics
+// @Accept json
+// @Produce json
+// @Param id path string true "Node ID"
+// @Param query query string true "PromQL query"
+// @Param start query string true "Start time (RFC3339 format)"
+// @Param end query string true "End time (RFC3339 format)"
+// @Param step query string true "Step duration (e.g. 1m, 5m, 1h)"
+// @Success 200 {object} map[string]interface{} "Metrics data"
+// @Failure 400 {object} map[string]interface{} "Bad request"
+// @Failure 500 {object} map[string]interface{} "Internal server error"
+// @Router /api/v1/metrics/node/{id}/range [get]
+func (h *Handler) GetNodeMetricsRange(w http.ResponseWriter, r *http.Request) {
+ nodeID := chi.URLParam(r, "id")
+ if nodeID == "" {
+ http.Error(w, "node ID is required", http.StatusBadRequest)
+ return
+ }
+
+ nodeIDInt, err := strconv.ParseInt(nodeID, 10, 64)
+ if err != nil {
+ http.Error(w, "invalid node ID", http.StatusBadRequest)
+ return
+ }
+
+ // Get query parameters
+ query := r.URL.Query().Get("query")
+ if query == "" {
+ http.Error(w, "query is required", http.StatusBadRequest)
+ return
+ }
+
+ startStr := r.URL.Query().Get("start")
+ if startStr == "" {
+ http.Error(w, "start time is required", http.StatusBadRequest)
+ return
+ }
+ start, err := time.Parse(time.RFC3339, startStr)
+ if err != nil {
+ http.Error(w, "invalid start time format (use RFC3339)", http.StatusBadRequest)
+ return
+ }
+
+ endStr := r.URL.Query().Get("end")
+ if endStr == "" {
+ http.Error(w, "end time is required", http.StatusBadRequest)
+ return
+ }
+ end, err := time.Parse(time.RFC3339, endStr)
+ if err != nil {
+ http.Error(w, "invalid end time format (use RFC3339)", http.StatusBadRequest)
+ return
+ }
+
+ stepStr := r.URL.Query().Get("step")
+ if stepStr == "" {
+ http.Error(w, "step is required", http.StatusBadRequest)
+ return
+ }
+ step, err := time.ParseDuration(stepStr)
+ if err != nil {
+ http.Error(w, "invalid step duration", http.StatusBadRequest)
+ return
+ }
+
+ // Validate time range
+ if end.Before(start) {
+ http.Error(w, "end time must be after start time", http.StatusBadRequest)
+ return
+ }
+
+ // Get metrics with time range
+ metrics, err := h.service.QueryMetricsRange(r.Context(), nodeIDInt, query, start, end, step)
+ if err != nil {
+ h.logger.Error("Failed to get node metrics range", "error", err)
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ w.Header().Set("Content-Type", "application/json")
+ w.WriteHeader(http.StatusOK)
+ json.NewEncoder(w).Encode(map[string]interface{}{
+ "status": "success",
+ "data": metrics,
+ })
+}
+
+// CustomQueryRequest represents the request body for custom Prometheus queries
+type CustomQueryRequest struct {
+ Query string `json:"query" binding:"required"`
+ Start *time.Time `json:"start,omitempty"`
+ End *time.Time `json:"end,omitempty"`
+ Step *string `json:"step,omitempty"`
+}
+
+// CustomQuery executes a custom Prometheus query
+// @Summary Execute custom Prometheus query
+// @Description Execute a custom Prometheus query with optional time range
+// @Tags metrics
+// @Accept json
+// @Produce json
+// @Param id path string true "Node ID"
+// @Param request body CustomQueryRequest true "Query parameters"
+// @Success 200 {object} common.QueryResult
+// @Failure 400 {object} map[string]string
+// @Failure 500 {object} map[string]string
+// @Router /api/v1/metrics/node/{id}/query [post]
+func (h *Handler) CustomQuery(w http.ResponseWriter, r *http.Request) {
+ nodeID := chi.URLParam(r, "id")
+ if nodeID == "" {
+ http.Error(w, "Node ID is required", http.StatusBadRequest)
+ return
+ }
+ nodeIDInt, err := strconv.ParseInt(nodeID, 10, 64)
+ if err != nil {
+ http.Error(w, "Invalid node ID", http.StatusBadRequest)
+ return
+ }
+
+ var req CustomQueryRequest
+ if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
+ http.Error(w, "Invalid request body: "+err.Error(), http.StatusBadRequest)
+ return
+ }
+
+ // If time range parameters are provided, use QueryRange
+ if req.Start != nil && req.End != nil {
+ step := 1 * time.Minute // Default step
+ if req.Step != nil {
+ var err error
+ step, err = time.ParseDuration(*req.Step)
+ if err != nil {
+ http.Error(w, "Invalid step duration: "+err.Error(), http.StatusBadRequest)
+ return
+ }
+ }
+
+ result, err := h.service.QueryRange(r.Context(), nodeIDInt, req.Query, *req.Start, *req.End, step)
+ if err != nil {
+ h.logger.Error("Failed to execute range query", "error", err)
+ http.Error(w, "Failed to execute range query: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+ w.Header().Set("Content-Type", "application/json")
+ w.WriteHeader(http.StatusOK)
+ json.NewEncoder(w).Encode(result)
+ return
+ }
+
+ // Otherwise use regular Query
+ result, err := h.service.Query(r.Context(), nodeIDInt, req.Query)
+ if err != nil {
+ h.logger.Error("Failed to execute query", "error", err)
+ http.Error(w, "Failed to execute query: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+ w.Header().Set("Content-Type", "application/json")
+ w.WriteHeader(http.StatusOK)
+ json.NewEncoder(w).Encode(result)
+}
+
+// GetStatus returns the current status of the Prometheus instance
+// @Summary Get Prometheus status
+// @Description Returns the current status of the Prometheus instance including version, port, and configuration
+// @Tags metrics
+// @Produce json
+// @Success 200 {object} common.Status
+// @Failure 500 {object} map[string]string
+// @Router /api/v1/metrics/status [get]
+func (h *Handler) GetStatus(w http.ResponseWriter, r *http.Request) {
+ status, err := h.service.GetStatus(r.Context())
+ if err != nil {
+ h.logger.Error("Failed to get Prometheus status", "error", err)
+ http.Error(w, "Failed to get Prometheus status", http.StatusInternalServerError)
+ return
+ }
+
+ w.Header().Set("Content-Type", "application/json")
+ w.WriteHeader(http.StatusOK)
+ json.NewEncoder(w).Encode(status)
+}
diff --git a/pkg/metrics/prometheus.go b/pkg/metrics/prometheus.go
new file mode 100644
index 0000000..5915fcf
--- /dev/null
+++ b/pkg/metrics/prometheus.go
@@ -0,0 +1,730 @@
+package metrics
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "os"
+ "regexp"
+ "strconv"
+ "strings"
+ "text/template"
+ "time"
+
+ "github.com/chainlaunch/chainlaunch/pkg/db"
+ "github.com/chainlaunch/chainlaunch/pkg/metrics/common"
+ nodeservice "github.com/chainlaunch/chainlaunch/pkg/nodes/service"
+ "github.com/docker/docker/api/types/container"
+ "github.com/docker/docker/api/types/image"
+ "github.com/docker/docker/api/types/mount"
+ "github.com/docker/docker/api/types/volume"
+ "github.com/docker/docker/client"
+ "github.com/docker/go-connections/nat"
+ v1 "github.com/opencontainers/image-spec/specs-go/v1"
+ "gopkg.in/yaml.v2"
+)
+
+// slugify converts a string to a URL-friendly slug
+func slugify(s string) string {
+ // Convert to lowercase
+ s = strings.ToLower(s)
+
+ // Replace spaces and special characters with hyphens
+ reg := regexp.MustCompile(`[^a-z0-9]+`)
+ s = reg.ReplaceAllString(s, "-")
+
+ // Remove leading and trailing hyphens
+ s = strings.Trim(s, "-")
+
+ return s
+}
+
+// PrometheusDeployer defines the interface for different Prometheus deployment methods
+type PrometheusDeployer interface {
+ // Start starts the Prometheus instance
+ Start(ctx context.Context) error
+ // Stop stops the Prometheus instance
+ Stop(ctx context.Context) error
+ // Reload reloads the Prometheus configuration
+ Reload(ctx context.Context) error
+ // GetStatus returns the current status of the Prometheus instance
+ GetStatus(ctx context.Context) (string, error)
+}
+
+// DockerPrometheusDeployer implements PrometheusDeployer for Docker deployment
+type DockerPrometheusDeployer struct {
+ config *common.Config
+ client *client.Client
+ db *db.Queries
+ nodeService *nodeservice.NodeService
+}
+
+// NewDockerPrometheusDeployer creates a new Docker-based Prometheus deployer
+func NewDockerPrometheusDeployer(config *common.Config, db *db.Queries, nodeService *nodeservice.NodeService) (*DockerPrometheusDeployer, error) {
+ cli, err := client.NewClientWithOpts(
+ client.FromEnv, client.WithAPIVersionNegotiation())
+ if err != nil {
+ return nil, fmt.Errorf("failed to create docker client: %w", err)
+ }
+
+ return &DockerPrometheusDeployer{
+ config: config,
+ client: cli,
+ db: db,
+ nodeService: nodeService,
+ }, nil
+}
+
+// Start starts the Prometheus container
+func (d *DockerPrometheusDeployer) Start(ctx context.Context) error {
+ containerName := "chainlaunch-prometheus"
+
+ // Create volumes if they don't exist
+ volumes := []string{
+ "chainlaunch-prometheus-data",
+ "chainlaunch-prometheus-config",
+ }
+
+ for _, volName := range volumes {
+ _, err := d.client.VolumeCreate(ctx, volume.CreateOptions{
+ Name: volName,
+ })
+ if err != nil {
+ return fmt.Errorf("failed to create volume %s: %w", volName, err)
+ }
+ }
+
+ // Generate prometheus.yml
+ configData, err := d.generateConfig()
+ if err != nil {
+ return fmt.Errorf("failed to generate Prometheus config: %w", err)
+ }
+
+ // Pull Prometheus image
+ imageName := fmt.Sprintf("prom/prometheus:%s", d.config.PrometheusVersion)
+ _, err = d.client.ImagePull(ctx, imageName, image.PullOptions{
+ // All: true,
+ })
+ if err != nil {
+ return fmt.Errorf("failed to pull Prometheus image: %w", err)
+ }
+
+ // Create container config
+ containerConfig := &container.Config{
+ Image: imageName,
+ Cmd: []string{
+ "--config.file=/etc/prometheus/prometheus.yml",
+ "--storage.tsdb.path=/prometheus",
+ "--web.console.libraries=/usr/share/prometheus/console_libraries",
+ "--web.console.templates=/usr/share/prometheus/consoles",
+ "--web.enable-lifecycle",
+ "--web.enable-admin-api",
+ },
+ ExposedPorts: nat.PortSet{
+ nat.Port(fmt.Sprintf("%d/tcp", d.config.PrometheusPort)): struct{}{},
+ },
+ }
+
+ // Create host config
+ hostConfig := &container.HostConfig{
+ PortBindings: nat.PortMap{
+ nat.Port(fmt.Sprintf("%d/tcp", d.config.PrometheusPort)): []nat.PortBinding{
+ {
+ HostIP: "0.0.0.0",
+ HostPort: fmt.Sprintf("%d", d.config.PrometheusPort),
+ },
+ },
+ },
+ Mounts: []mount.Mount{
+ {
+ Type: mount.TypeVolume,
+ Source: "chainlaunch-prometheus-data",
+ Target: "/prometheus",
+ },
+ {
+ Type: mount.TypeVolume,
+ Source: "chainlaunch-prometheus-config",
+ Target: "/etc/prometheus",
+ },
+ },
+ RestartPolicy: container.RestartPolicy{
+ Name: "unless-stopped",
+ },
+ ExtraHosts: []string{"host.docker.internal:host-gateway"},
+ }
+
+ // Create container
+ resp, err := d.client.ContainerCreate(ctx, containerConfig, hostConfig, nil, &v1.Platform{}, containerName)
+ if err != nil {
+ return fmt.Errorf("failed to create container: %w", err)
+ }
+
+ // Start container
+ if err := d.client.ContainerStart(ctx, resp.ID, container.StartOptions{}); err != nil {
+ return fmt.Errorf("failed to start container: %w", err)
+ }
+
+ // Wait for container to be ready
+ time.Sleep(2 * time.Second)
+
+ // Create config file in the config volume
+ configPath := "/etc/prometheus/prometheus.yml"
+ _, err = d.client.ContainerExecCreate(ctx, containerName, container.ExecOptions{
+ Cmd: []string{"sh", "-c", fmt.Sprintf("echo '%s' > %s", configData, configPath)},
+ })
+ if err != nil {
+ return fmt.Errorf("failed to create config file: %w", err)
+ }
+
+ // Reload configuration
+ return d.Reload(ctx)
+}
+
+// Stop stops the Prometheus container
+func (d *DockerPrometheusDeployer) Stop(ctx context.Context) error {
+ containerName := "chainlaunch-prometheus"
+
+ // Stop container
+ if err := d.client.ContainerStop(ctx, containerName, container.StopOptions{}); err != nil {
+ return fmt.Errorf("failed to stop container: %w", err)
+ }
+
+ // Remove container
+ if err := d.client.ContainerRemove(ctx, containerName, container.RemoveOptions{
+ Force: true,
+ }); err != nil {
+ return fmt.Errorf("failed to remove container: %w", err)
+ }
+
+ return nil
+}
+
+// PrometheusConfig represents the Prometheus configuration structure
+type PrometheusConfig struct {
+ Global GlobalConfig `yaml:"global"`
+ ScrapeConfigs []ScrapeConfig `yaml:"scrape_configs"`
+}
+
+// GlobalConfig represents the global Prometheus configuration
+type GlobalConfig struct {
+ ScrapeInterval string `yaml:"scrape_interval"`
+}
+
+// ScrapeConfig represents a Prometheus scrape configuration
+type ScrapeConfig struct {
+ JobName string `yaml:"job_name"`
+ StaticConfigs []StaticConfig `yaml:"static_configs"`
+}
+
+// StaticConfig represents a static target configuration
+type StaticConfig struct {
+ Targets []string `yaml:"targets"`
+}
+
+// PeerNode represents a peer node in the system
+type PeerNode struct {
+ ID string
+ Name string
+ OperationAddress string
+}
+
+// getPeerNodes retrieves peer nodes from the database
+func (d *DockerPrometheusDeployer) getPeerNodes(ctx context.Context) ([]PeerNode, error) {
+ // Get peer nodes from database
+ nodes, err := d.nodeService.GetAllNodes(ctx)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get peer nodes: %w", err)
+ }
+
+ peerNodes := make([]PeerNode, 0)
+ for _, node := range nodes.Items {
+ if node.FabricPeer == nil {
+ continue
+ }
+ operationAddress := node.FabricPeer.OperationsAddress
+ if operationAddress == "" {
+ operationAddress = node.FabricPeer.ExternalEndpoint
+ }
+
+ // Extract port from operations address
+ var port string
+ if parts := strings.Split(operationAddress, ":"); len(parts) > 1 {
+ port = parts[len(parts)-1]
+ } else {
+ port = "9443" // Default operations port if not specified
+ }
+
+ // Use host.docker.internal to access host machine from container
+ formattedAddress := fmt.Sprintf("host.docker.internal:%s", port)
+
+ peerNodes = append(peerNodes, PeerNode{
+ ID: strconv.FormatInt(node.ID, 10),
+ Name: node.Name,
+ OperationAddress: formattedAddress,
+ })
+ }
+
+ return peerNodes, nil
+}
+
+// getOrdererNodes retrieves orderer nodes from the database
+func (d *DockerPrometheusDeployer) getOrdererNodes(ctx context.Context) ([]PeerNode, error) {
+ // Get all nodes from database
+ nodes, err := d.nodeService.GetAllNodes(ctx)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get nodes: %w", err)
+ }
+
+ ordererNodes := make([]PeerNode, 0)
+ for _, node := range nodes.Items {
+ if node.FabricOrderer == nil {
+ continue
+ }
+
+ operationAddress := node.FabricOrderer.OperationsAddress
+ if operationAddress == "" {
+ operationAddress = node.FabricOrderer.ExternalEndpoint
+ }
+
+ // Extract port from operations address
+ var port string
+ if parts := strings.Split(operationAddress, ":"); len(parts) > 1 {
+ port = parts[len(parts)-1]
+ } else {
+ port = "9443" // Default operations port if not specified
+ }
+
+ // Use host.docker.internal to access host machine from container
+ formattedAddress := fmt.Sprintf("host.docker.internal:%s", port)
+
+ ordererNodes = append(ordererNodes, PeerNode{
+ ID: strconv.FormatInt(node.ID, 10),
+ Name: node.Name,
+ OperationAddress: formattedAddress,
+ })
+ }
+
+ return ordererNodes, nil
+}
+
+// getBesuNodes retrieves Besu nodes from the database that have metrics enabled
+func (d *DockerPrometheusDeployer) getBesuNodes(ctx context.Context) ([]PeerNode, error) {
+ // Get all nodes from database
+ nodes, err := d.nodeService.GetAllNodes(ctx)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get nodes: %w", err)
+ }
+
+ besuNodes := make([]PeerNode, 0)
+ for _, node := range nodes.Items {
+ // Skip nodes that are not Besu nodes or don't have metrics enabled
+ if node.BesuNode == nil || !node.BesuNode.MetricsEnabled {
+ continue
+ }
+
+ // Get metrics host and port
+ metricsHost := node.BesuNode.MetricsHost
+ if metricsHost == "" || metricsHost == "0.0.0.0" {
+ // Use host.docker.internal to access host machine from container
+ metricsHost = "host.docker.internal"
+ }
+
+ metricsPort := fmt.Sprintf("%d", node.BesuNode.MetricsPort)
+ if metricsPort == "0" {
+ metricsPort = "9545" // Default metrics port if not specified
+ }
+
+ formattedAddress := fmt.Sprintf("%s:%s", metricsHost, metricsPort)
+
+ besuNodes = append(besuNodes, PeerNode{
+ ID: strconv.FormatInt(node.ID, 10),
+ Name: node.Name,
+ OperationAddress: formattedAddress,
+ })
+ }
+
+ return besuNodes, nil
+}
+
+// GetStatus returns the current status of the Prometheus container
+func (d *DockerPrometheusDeployer) GetStatus(ctx context.Context) (string, error) {
+ containerName := "chainlaunch-prometheus"
+
+ // Get container info
+ container, err := d.client.ContainerInspect(ctx, containerName)
+ if err != nil {
+ return "", fmt.Errorf("failed to inspect container: %w", err)
+ }
+
+ return container.State.Status, nil
+}
+
+// generateConfig generates the Prometheus configuration file content
+func (d *DockerPrometheusDeployer) generateConfig() (string, error) {
+ tmpl := `global:
+ scrape_interval: {{ .ScrapeInterval }}
+
+scrape_configs:
+ - job_name: 'prometheus'
+ static_configs:
+ - targets: ['localhost:9090']
+`
+
+ t, err := template.New("prometheus").Parse(tmpl)
+ if err != nil {
+ return "", fmt.Errorf("failed to parse template: %w", err)
+ }
+
+ var buf bytes.Buffer
+ if err := t.Execute(&buf, d.config); err != nil {
+ return "", fmt.Errorf("failed to execute template: %w", err)
+ }
+
+ return buf.String(), nil
+}
+
+// PrometheusManager handles the lifecycle of a Prometheus instance
+type PrometheusManager struct {
+ deployer PrometheusDeployer
+ client *Client
+}
+
+// NewPrometheusManager creates a new PrometheusManager
+func NewPrometheusManager(config *common.Config, db *db.Queries, nodeService *nodeservice.NodeService) (*PrometheusManager, error) {
+ var deployer PrometheusDeployer
+ var err error
+
+ switch config.DeploymentMode {
+ case "docker":
+ deployer, err = NewDockerPrometheusDeployer(config, db, nodeService)
+ if err != nil {
+ return nil, fmt.Errorf("failed to create docker deployer: %w", err)
+ }
+ default:
+ deployer, err = NewDockerPrometheusDeployer(config, db, nodeService)
+ if err != nil {
+ return nil, fmt.Errorf("failed to create docker deployer: %w", err)
+ }
+ }
+
+ // Create Prometheus client
+ client := NewClient(fmt.Sprintf("http://localhost:%d", config.PrometheusPort))
+
+ return &PrometheusManager{
+ deployer: deployer,
+ client: client,
+ }, nil
+}
+
+// Reload reloads the Prometheus configuration
+func (d *DockerPrometheusDeployer) Reload(ctx context.Context) error {
+ containerName := "chainlaunch-prometheus"
+
+ // Get peer nodes from the database
+ peerNodes, err := d.getPeerNodes(ctx)
+ if err != nil {
+ return fmt.Errorf("failed to get peer nodes: %w", err)
+ }
+
+ // Get orderer nodes from the database
+ ordererNodes, err := d.getOrdererNodes(ctx)
+ if err != nil {
+ return fmt.Errorf("failed to get orderer nodes: %w", err)
+ }
+
+ // Get Besu nodes from the database
+ besuNodes, err := d.getBesuNodes(ctx)
+ if err != nil {
+ return fmt.Errorf("failed to get Besu nodes: %w", err)
+ }
+
+ // Generate new config with peer targets
+ config := &PrometheusConfig{
+ Global: GlobalConfig{
+ ScrapeInterval: d.config.ScrapeInterval.String(),
+ },
+ ScrapeConfigs: []ScrapeConfig{
+ {
+ JobName: "prometheus",
+ StaticConfigs: []StaticConfig{
+ {
+ Targets: []string{"localhost:9090"},
+ },
+ },
+ },
+ },
+ }
+
+ // Add peer node targets
+ if len(peerNodes) > 0 {
+ for _, node := range peerNodes {
+ jobName := slugify(fmt.Sprintf("%s-%s", node.ID, node.Name))
+ config.ScrapeConfigs = append(config.ScrapeConfigs, ScrapeConfig{
+ JobName: jobName,
+ StaticConfigs: []StaticConfig{
+ {
+ Targets: []string{node.OperationAddress},
+ },
+ },
+ })
+ }
+ }
+
+ // Add orderer node targets
+ if len(ordererNodes) > 0 {
+ for _, node := range ordererNodes {
+ jobName := slugify(fmt.Sprintf("%s-%s", node.ID, node.Name))
+ config.ScrapeConfigs = append(config.ScrapeConfigs, ScrapeConfig{
+ JobName: jobName,
+ StaticConfigs: []StaticConfig{
+ {
+ Targets: []string{node.OperationAddress},
+ },
+ },
+ })
+ }
+ }
+
+ // Add Besu node targets
+ if len(besuNodes) > 0 {
+ for _, node := range besuNodes {
+ jobName := slugify(fmt.Sprintf("%s-%s", node.ID, node.Name))
+ config.ScrapeConfigs = append(config.ScrapeConfigs, ScrapeConfig{
+ JobName: jobName,
+ StaticConfigs: []StaticConfig{
+ {
+ Targets: []string{node.OperationAddress},
+ },
+ },
+ })
+ }
+ }
+
+ // Marshal config to YAML
+ configData, err := yaml.Marshal(config)
+ if err != nil {
+ return fmt.Errorf("failed to marshal config: %w", err)
+ }
+
+ // Create config file in the config volume
+ configPath := "/etc/prometheus/prometheus.yml"
+ _, err = d.client.ContainerExecCreate(ctx, containerName, container.ExecOptions{
+ Cmd: []string{"sh", "-c", fmt.Sprintf("echo '%s' > %s", string(configData), configPath)},
+ })
+ if err != nil {
+ return fmt.Errorf("failed to create config file: %w", err)
+ }
+
+ // Execute the exec command
+ execID, err := d.client.ContainerExecCreate(ctx, containerName, container.ExecOptions{
+ Cmd: []string{"sh", "-c", fmt.Sprintf("echo '%s' > %s", string(configData), configPath)},
+ })
+ if err != nil {
+ return fmt.Errorf("failed to create exec command: %w", err)
+ }
+
+ if err := d.client.ContainerExecStart(ctx, execID.ID, container.ExecStartOptions{}); err != nil {
+ return fmt.Errorf("failed to start exec command: %w", err)
+ }
+
+ // Reload Prometheus configuration
+ reloadExecID, err := d.client.ContainerExecCreate(ctx, containerName, container.ExecOptions{
+ Cmd: []string{"wget", "-q", "--post-data", "reload", "http://localhost:9090/-/reload"},
+ })
+ if err != nil {
+ return fmt.Errorf("failed to create reload command: %w", err)
+ }
+
+ if err := d.client.ContainerExecStart(ctx, reloadExecID.ID, container.ExecStartOptions{}); err != nil {
+ return fmt.Errorf("failed to start reload command: %w", err)
+ }
+
+ return nil
+}
+
+// Start starts the Prometheus instance
+func (pm *PrometheusManager) Start(ctx context.Context) error {
+ return pm.deployer.Start(ctx)
+}
+
+// Stop stops the Prometheus instance
+func (pm *PrometheusManager) Stop(ctx context.Context) error {
+ return pm.deployer.Stop(ctx)
+}
+
+// AddTarget adds a new target to the Prometheus configuration
+func (pm *PrometheusManager) AddTarget(ctx context.Context, jobName string, targets []string) error {
+ // Read existing config
+ configPath := "/etc/prometheus/prometheus.yml"
+ configData, err := os.ReadFile(configPath)
+ if err != nil {
+ return fmt.Errorf("failed to read config file: %w", err)
+ }
+
+ // Parse existing config
+ var config struct {
+ Global struct {
+ ScrapeInterval string `yaml:"scrape_interval"`
+ } `yaml:"global"`
+ ScrapeConfigs []struct {
+ JobName string `yaml:"job_name"`
+ StaticConfigs []struct {
+ Targets []string `yaml:"targets"`
+ } `yaml:"static_configs"`
+ } `yaml:"scrape_configs"`
+ }
+
+ if err := yaml.Unmarshal(configData, &config); err != nil {
+ return fmt.Errorf("failed to parse config: %w", err)
+ }
+
+ // Add new target
+ config.ScrapeConfigs = append(config.ScrapeConfigs, struct {
+ JobName string `yaml:"job_name"`
+ StaticConfigs []struct {
+ Targets []string `yaml:"targets"`
+ } `yaml:"static_configs"`
+ }{
+ JobName: jobName,
+ StaticConfigs: []struct {
+ Targets []string `yaml:"targets"`
+ }{
+ {
+ Targets: targets,
+ },
+ },
+ })
+
+ // Write updated config
+ newConfigData, err := yaml.Marshal(config)
+ if err != nil {
+ return fmt.Errorf("failed to marshal config: %w", err)
+ }
+
+ if err := os.WriteFile(configPath, newConfigData, 0644); err != nil {
+ return fmt.Errorf("failed to write config file: %w", err)
+ }
+
+ // Reload Prometheus configuration
+ return pm.deployer.Reload(ctx)
+}
+
+// RemoveTarget removes a target from the Prometheus configuration
+func (pm *PrometheusManager) RemoveTarget(ctx context.Context, jobName string) error {
+ // Read existing config
+ configPath := "/etc/prometheus/prometheus.yml"
+ configData, err := os.ReadFile(configPath)
+ if err != nil {
+ return fmt.Errorf("failed to read config file: %w", err)
+ }
+
+ // Parse existing config
+ var config struct {
+ Global struct {
+ ScrapeInterval string `yaml:"scrape_interval"`
+ } `yaml:"global"`
+ ScrapeConfigs []struct {
+ JobName string `yaml:"job_name"`
+ StaticConfigs []struct {
+ Targets []string `yaml:"targets"`
+ } `yaml:"static_configs"`
+ } `yaml:"scrape_configs"`
+ }
+
+ if err := yaml.Unmarshal(configData, &config); err != nil {
+ return fmt.Errorf("failed to parse config: %w", err)
+ }
+
+ // Remove target
+ newScrapeConfigs := make([]struct {
+ JobName string `yaml:"job_name"`
+ StaticConfigs []struct {
+ Targets []string `yaml:"targets"`
+ } `yaml:"static_configs"`
+ }, 0)
+
+ for _, sc := range config.ScrapeConfigs {
+ if sc.JobName != jobName {
+ newScrapeConfigs = append(newScrapeConfigs, sc)
+ }
+ }
+
+ config.ScrapeConfigs = newScrapeConfigs
+
+ // Write updated config
+ newConfigData, err := yaml.Marshal(config)
+ if err != nil {
+ return fmt.Errorf("failed to marshal config: %w", err)
+ }
+
+ if err := os.WriteFile(configPath, newConfigData, 0644); err != nil {
+ return fmt.Errorf("failed to write config file: %w", err)
+ }
+
+ // Reload Prometheus configuration
+ return pm.deployer.Reload(ctx)
+}
+
+// Query executes a PromQL query against Prometheus
+func (pm *PrometheusManager) Query(ctx context.Context, query string) (*common.QueryResult, error) {
+ return pm.client.Query(ctx, query)
+}
+
+// QueryRange executes a PromQL query with a time range
+func (pm *PrometheusManager) QueryRange(ctx context.Context, query string, start, end time.Time, step time.Duration) (*common.QueryResult, error) {
+ return pm.client.QueryRange(ctx, query, start, end, step)
+}
+
+// GetLabelValues retrieves values for a specific label
+func (pm *PrometheusManager) GetLabelValues(ctx context.Context, labelName string, matches []string) ([]string, error) {
+ return pm.client.GetLabelValues(ctx, labelName, matches)
+}
+
+// GetStatus returns the current status of the Prometheus instance
+func (pm *PrometheusManager) GetStatus(ctx context.Context) (*common.Status, error) {
+ status := &common.Status{
+ Status: "not_deployed",
+ }
+
+ // Try to get container status
+ containerName := "chainlaunch-prometheus"
+ dockerDeployer, ok := pm.deployer.(*DockerPrometheusDeployer)
+ if !ok {
+ return nil, fmt.Errorf("deployer is not a DockerPrometheusDeployer")
+ }
+
+ container, err := dockerDeployer.client.ContainerInspect(ctx, containerName)
+ if err != nil {
+ if client.IsErrNotFound(err) {
+ return status, nil
+ }
+ return nil, fmt.Errorf("failed to inspect container: %w", err)
+ }
+
+ // Container exists, get its status
+ status.Status = container.State.Status
+ startedAt, err := time.Parse(time.RFC3339, container.State.StartedAt)
+ if err != nil {
+ status.Error = fmt.Sprintf("failed to parse start time: %v", err)
+ } else {
+ status.StartedAt = &startedAt
+ }
+
+ // Get configuration from database
+ config, err := dockerDeployer.db.GetPrometheusConfig(ctx)
+ if err != nil {
+ status.Error = fmt.Sprintf("failed to get configuration: %v", err)
+ return status, nil
+ }
+
+ // Add configuration details
+ status.Version = strings.TrimPrefix(config.DockerImage, "prom/prometheus:")
+ status.Port = int(config.PrometheusPort)
+ status.ScrapeInterval = time.Duration(config.ScrapeInterval) * time.Second
+ status.DeploymentMode = config.DeploymentMode
+
+ return status, nil
+}
+func (pm *PrometheusManager) Reload(ctx context.Context) error {
+ return pm.deployer.Reload(ctx)
+}
diff --git a/pkg/metrics/service.go b/pkg/metrics/service.go
new file mode 100644
index 0000000..562dd15
--- /dev/null
+++ b/pkg/metrics/service.go
@@ -0,0 +1,160 @@
+package metrics
+
+import (
+ "context"
+ "fmt"
+ "strings"
+ "time"
+
+ "github.com/chainlaunch/chainlaunch/pkg/db"
+ "github.com/chainlaunch/chainlaunch/pkg/metrics/common"
+ nodeservice "github.com/chainlaunch/chainlaunch/pkg/nodes/service"
+)
+
+// service implements the Service interface
+type service struct {
+ manager *PrometheusManager
+ nodeService *nodeservice.NodeService
+}
+
+// NewService creates a new metrics service
+func NewService(config *common.Config, db *db.Queries, nodeService *nodeservice.NodeService) (common.Service, error) {
+ manager, err := NewPrometheusManager(config, db, nodeService)
+ if err != nil {
+ return nil, err
+ }
+ return &service{
+ manager: manager,
+ nodeService: nodeService,
+ }, nil
+}
+
+// Start starts the Prometheus instance
+func (s *service) Start(ctx context.Context, config *common.Config) error {
+ return s.manager.Start(ctx)
+}
+
+// Stop stops the Prometheus instance
+func (s *service) Stop(ctx context.Context) error {
+ return s.manager.Stop(ctx)
+}
+
+// QueryMetrics retrieves metrics for a specific node
+func (s *service) QueryMetrics(ctx context.Context, nodeID int64, query string) (map[string]interface{}, error) {
+ // Get node type and create job name
+ node, err := s.nodeService.GetNodeByID(ctx, nodeID)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get node: %w", err)
+ }
+ jobName := slugify(fmt.Sprintf("%d-%s", node.ID, node.Name))
+
+ // If no query is provided, use default metrics
+ if query == "" {
+ query = fmt.Sprintf(`{job="%s"}`, jobName)
+ } else {
+ // If query is provided, it's just a label, so add job filter
+ query = fmt.Sprintf(`%s{job="%s"}`, query, jobName)
+ }
+
+ // Query Prometheus for metrics
+ result, err := s.manager.Query(ctx, query)
+ if err != nil {
+ return nil, fmt.Errorf("failed to query metrics: %w", err)
+ }
+
+ return map[string]interface{}{
+ "node_id": nodeID,
+ "job": jobName,
+ "query": query,
+ "result": result,
+ }, nil
+}
+
+// QueryMetricsRange retrieves metrics for a specific node within a time range
+func (s *service) QueryMetricsRange(ctx context.Context, nodeID int64, query string, start, end time.Time, step time.Duration) (map[string]interface{}, error) {
+ node, err := s.nodeService.GetNodeByID(ctx, nodeID)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get node: %w", err)
+ }
+ jobName := slugify(fmt.Sprintf("%d-%s", node.ID, node.Name))
+
+ // Add job filter to query
+ if !strings.Contains(query, "job=") {
+ query = fmt.Sprintf(`%s{job="%s"}`, query, jobName)
+ }
+
+ // Query Prometheus for metrics with time range
+ result, err := s.manager.QueryRange(ctx, query, start, end, step)
+ if err != nil {
+ return nil, fmt.Errorf("failed to query metrics range: %w", err)
+ }
+
+ return map[string]interface{}{
+ "node_id": nodeID,
+ "job": jobName,
+ "query": query,
+ "result": result,
+ }, nil
+}
+
+// GetLabelValues retrieves values for a specific label
+func (s *service) GetLabelValues(ctx context.Context, nodeID int64, labelName string, matches []string) ([]string, error) {
+ node, err := s.nodeService.GetNodeByID(ctx, nodeID)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get node: %w", err)
+ }
+ jobName := slugify(fmt.Sprintf("%s-%d", node.NodeType, node.ID))
+
+ // Add job filter to matches
+ jobMatch := fmt.Sprintf(`{job="%s"}`, jobName)
+ _ = jobMatch
+ // matches = append(matches, jobMatch)
+
+ result, err := s.manager.GetLabelValues(ctx, labelName, matches)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get label values: %w", err)
+ }
+ return result, nil
+}
+
+// Reload reloads the Prometheus configuration
+func (s *service) Reload(ctx context.Context) error {
+ return s.manager.Reload(ctx)
+}
+
+// Query executes a PromQL query for a specific node
+func (s *service) Query(ctx context.Context, nodeID int64, query string) (*common.QueryResult, error) {
+ node, err := s.nodeService.GetNodeByID(ctx, nodeID)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get node: %w", err)
+ }
+ jobName := slugify(fmt.Sprintf("%d-%s", node.ID, node.Name))
+
+ // Add job filter to query if not already present
+ if !strings.Contains(query, "job=") {
+ query = fmt.Sprintf(`%s{job="%s"}`, query, jobName)
+ }
+
+ return s.manager.Query(ctx, query)
+}
+
+// QueryRange executes a PromQL query with a time range for a specific node
+func (s *service) QueryRange(ctx context.Context, nodeID int64, query string, start, end time.Time, step time.Duration) (*common.QueryResult, error) {
+ node, err := s.nodeService.GetNodeByID(ctx, nodeID)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get node: %w", err)
+ }
+ jobName := slugify(fmt.Sprintf("%d-%s", node.ID, node.Name))
+
+ // Add job filter to query if not already present
+ if strings.Contains(query, "{jobName") {
+ query = strings.Replace(query, "{jobName}", jobName, 1)
+ }
+
+ return s.manager.QueryRange(ctx, query, start, end, step)
+}
+
+// GetStatus returns the current status of the Prometheus instance
+func (s *service) GetStatus(ctx context.Context) (*common.Status, error) {
+ return s.manager.GetStatus(ctx)
+}
diff --git a/pkg/networks/http/handler.go b/pkg/networks/http/handler.go
index 467d0e4..b2ed484 100644
--- a/pkg/networks/http/handler.go
+++ b/pkg/networks/http/handler.go
@@ -12,6 +12,7 @@ import (
"encoding/base64"
+ httpchainlaunch "github.com/chainlaunch/chainlaunch/pkg/http"
"github.com/chainlaunch/chainlaunch/pkg/networks/service"
"github.com/chainlaunch/chainlaunch/pkg/networks/service/fabric"
"github.com/chainlaunch/chainlaunch/pkg/networks/service/types"
@@ -37,7 +38,11 @@ func NewHandler(networkService *service.NetworkService, nodeService *nodeservice
// RegisterRoutes registers the network routes
func (h *Handler) RegisterRoutes(r chi.Router) {
+ // Fabric network routes with resource middleware
r.Route("/networks/fabric", func(r chi.Router) {
+ // Add resource middleware for all Fabric network routes
+ r.Use(httpchainlaunch.ResourceMiddleware("fabric_network"))
+
r.Get("/", h.FabricNetworkList)
r.Post("/", h.FabricNetworkCreate)
r.Delete("/{id}", h.FabricNetworkDelete)
@@ -66,15 +71,17 @@ func (h *Handler) RegisterRoutes(r chi.Router) {
r.Post("/{id}/organization-crl", h.UpdateOrganizationCRL)
})
- // New Besu routes
+ // Besu network routes with resource middleware
r.Route("/networks/besu", func(r chi.Router) {
+ // Add resource middleware for all Besu network routes
+ r.Use(httpchainlaunch.ResourceMiddleware("besu_network"))
+
r.Get("/", h.BesuNetworkList)
r.Post("/", h.BesuNetworkCreate)
r.Post("/import", h.ImportBesuNetwork)
r.Get("/{id}", h.BesuNetworkGet)
r.Delete("/{id}", h.BesuNetworkDelete)
})
-
}
// @Summary List Fabric networks
@@ -394,7 +401,7 @@ func (h *Handler) FabricNetworkGetChannelConfig(w http.ResponseWriter, r *http.R
writeError(w, http.StatusInternalServerError, "get_network_failed", err.Error())
return
}
- config, err := h.networkService.GetChannelConfig(networkID)
+ config, err := h.networkService.GetFabricChannelConfig(networkID)
if err != nil {
writeError(w, http.StatusInternalServerError, "get_config_failed", err.Error())
return
@@ -427,7 +434,7 @@ func (h *Handler) FabricNetworkGetCurrentChannelConfig(w http.ResponseWriter, r
writeError(w, http.StatusInternalServerError, "get_network_failed", err.Error())
return
}
- config, err := h.networkService.GetCurrentChannelConfig(networkID)
+ config, err := h.networkService.GetFabricCurrentChannelConfig(networkID)
if err != nil {
writeError(w, http.StatusInternalServerError, "get_current_config_failed", err.Error())
return
@@ -1047,7 +1054,7 @@ func (h *Handler) ReloadNetworkBlock(w http.ResponseWriter, r *http.Request) {
}
// Call service method to reload block
- err = h.networkService.ReloadNetworkBlock(r.Context(), networkIDInt)
+ err = h.networkService.ReloadFabricNetworkBlock(r.Context(), networkIDInt)
if err != nil {
// Handle different types of errors
if err.Error() == "network not found" {
@@ -1143,7 +1150,7 @@ func (h *Handler) ImportFabricNetworkWithOrg(w http.ResponseWriter, r *http.Requ
return
}
- result, err := h.networkService.ImportNetworkWithOrg(r.Context(), service.ImportNetworkWithOrgParams{
+ result, err := h.networkService.ImportFabricNetworkWithOrg(r.Context(), service.ImportNetworkWithOrgParams{
ChannelID: req.ChannelID,
OrganizationID: req.OrganizationID,
OrdererURL: req.OrdererURL,
@@ -1681,7 +1688,7 @@ func (h *Handler) FabricGetBlocks(w http.ResponseWriter, r *http.Request) {
reverse = reverseBool
}
- blocks, total, err := h.networkService.GetBlocks(r.Context(), networkID, limit, offset, reverse)
+ blocks, total, err := h.networkService.GetFabricBlocks(r.Context(), networkID, limit, offset, reverse)
if err != nil {
writeError(w, http.StatusInternalServerError, "get_blocks_failed", err.Error())
return
@@ -1718,7 +1725,7 @@ func (h *Handler) FabricGetBlock(w http.ResponseWriter, r *http.Request) {
return
}
- blck, err := h.networkService.GetBlockTransactions(r.Context(), networkID, blockNum)
+ blck, err := h.networkService.GetFabricBlock(r.Context(), networkID, blockNum)
if err != nil {
if err.Error() == "block not found" {
writeError(w, http.StatusNotFound, "block_not_found", "Block not found")
@@ -1758,7 +1765,7 @@ func (h *Handler) FabricGetTransaction(w http.ResponseWriter, r *http.Request) {
return
}
- blck, err := h.networkService.GetBlockByTransaction(r.Context(), networkID, txID)
+ blck, err := h.networkService.GetFabricBlockByTransaction(r.Context(), networkID, txID)
if err != nil {
if err.Error() == "transaction not found" {
writeError(w, http.StatusNotFound, "transaction_not_found", "Transaction not found")
@@ -1855,7 +1862,7 @@ func (h *Handler) GetChainInfo(w http.ResponseWriter, r *http.Request) {
}
// Get chain info from service layer
- chainInfo, err := h.networkService.GetChainInfo(r.Context(), networkID)
+ chainInfo, err := h.networkService.GetFabricChainInfo(r.Context(), networkID)
if err != nil {
writeError(w, http.StatusInternalServerError, "get_chain_info_failed", err.Error())
return
diff --git a/pkg/networks/service/besu.go b/pkg/networks/service/besu.go
new file mode 100644
index 0000000..f673e2b
--- /dev/null
+++ b/pkg/networks/service/besu.go
@@ -0,0 +1,32 @@
+package service
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/chainlaunch/chainlaunch/pkg/networks/service/besu"
+)
+
+func (s *NetworkService) importBesuNetwork(ctx context.Context, params ImportNetworkParams) (*ImportNetworkResult, error) {
+ // Get the Besu deployer
+ deployer, err := s.deployerFactory.GetDeployer("besu")
+ if err != nil {
+ return nil, fmt.Errorf("failed to get Besu deployer: %w", err)
+ }
+
+ besuDeployer, ok := deployer.(*besu.BesuDeployer)
+ if !ok {
+ return nil, fmt.Errorf("invalid deployer type")
+ }
+
+ // Import the network using the Besu deployer
+ networkID, err := besuDeployer.ImportNetwork(ctx, params.GenesisFile, params.Name, params.Description)
+ if err != nil {
+ return nil, fmt.Errorf("failed to import Besu network: %w", err)
+ }
+
+ return &ImportNetworkResult{
+ NetworkID: networkID,
+ Message: "Besu network imported successfully",
+ }, nil
+}
diff --git a/pkg/networks/service/fabric.go b/pkg/networks/service/fabric.go
new file mode 100644
index 0000000..efdde82
--- /dev/null
+++ b/pkg/networks/service/fabric.go
@@ -0,0 +1,645 @@
+package service
+
+import (
+ "context"
+ "database/sql"
+ "encoding/base64"
+ "fmt"
+ "time"
+
+ "github.com/chainlaunch/chainlaunch/pkg/db"
+ "github.com/chainlaunch/chainlaunch/pkg/networks/service/fabric"
+ fabricblock "github.com/chainlaunch/chainlaunch/pkg/networks/service/fabric/block"
+ "github.com/chainlaunch/chainlaunch/pkg/networks/service/types"
+ nodetypes "github.com/chainlaunch/chainlaunch/pkg/nodes/types"
+ "github.com/sirupsen/logrus"
+)
+
+type AnchorPeer struct {
+ Host string `json:"host"`
+ Port int `json:"port"`
+}
+
+// UpdateOrganizationCRL updates the CRL for an organization in the network
+func (s *NetworkService) UpdateOrganizationCRL(ctx context.Context, networkID, organizationID int64) (string, error) {
+ // Get network details
+ network, err := s.db.GetNetwork(ctx, networkID)
+ if err != nil {
+ return "", fmt.Errorf("failed to get network: %w", err)
+ }
+
+ // Get deployer
+ deployer, err := s.deployerFactory.GetDeployer(network.Platform)
+ if err != nil {
+ return "", fmt.Errorf("failed to get deployer: %w", err)
+ }
+
+ fabricDeployer, ok := deployer.(*fabric.FabricDeployer)
+ if !ok {
+ return "", fmt.Errorf("network %d is not a Fabric network", networkID)
+ }
+
+ // Update the CRL in the network
+ txID, err := fabricDeployer.UpdateOrganizationCRL(ctx, networkID, fabric.UpdateOrganizationCRLInput{
+ OrganizationID: organizationID,
+ })
+ if err != nil {
+ return "", fmt.Errorf("failed to update CRL: %w", err)
+ }
+
+ logrus.Info("Reloading network block after updating CRL, waiting 3 seconds")
+ time.Sleep(3 * time.Second)
+
+ // Reload network block
+ if err := s.ReloadFabricNetworkBlock(ctx, networkID); err != nil {
+ logrus.Errorf("Failed to reload network block after updating CRL: %v", err)
+ }
+
+ return txID, nil
+}
+
+// UpdateFabricNetwork prepares a config update proposal for a Fabric network
+func (s *NetworkService) UpdateFabricNetwork(ctx context.Context, networkID int64, operations []fabric.ConfigUpdateOperation) (*fabric.ConfigUpdateProposal, error) {
+ // Get deployer for the network
+ deployer, err := s.deployerFactory.GetDeployer(string(BlockchainTypeFabric))
+ if err != nil {
+ return nil, fmt.Errorf("failed to get deployer: %w", err)
+ }
+
+ // Assert that it's a Fabric deployer
+ fabricDeployer, ok := deployer.(*fabric.FabricDeployer)
+ if !ok {
+ return nil, fmt.Errorf("network %d is not a Fabric network", networkID)
+ }
+
+ // Prepare the config update
+ proposal, err := fabricDeployer.PrepareConfigUpdate(ctx, networkID, operations)
+ if err != nil {
+ return nil, fmt.Errorf("failed to prepare config update: %w", err)
+ }
+
+ // Get organizations managed by us that can sign the config update
+ orgs, err := s.db.ListFabricOrganizations(ctx)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get network organizations: %w", err)
+ }
+ var signingOrgIDs []string
+ for _, org := range orgs {
+ signingOrgIDs = append(signingOrgIDs, org.MspID)
+ }
+
+ ordererAddress, ordererTLSCert, err := s.getOrdererAddressAndCertForNetwork(ctx, networkID, fabricDeployer)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get orderer address and TLS certificate: %w", err)
+ }
+
+ res, err := fabricDeployer.UpdateChannelConfig(ctx, networkID, proposal.ConfigUpdateEnvelope, signingOrgIDs, ordererAddress, ordererTLSCert)
+ if err != nil {
+ return nil, fmt.Errorf("failed to update channel config: %w", err)
+ }
+ s.logger.Info("Channel config updated", "txID", res)
+ return proposal, nil
+}
+
+func (s *NetworkService) getOrdererAddressAndCertForNetwork(ctx context.Context, networkID int64, fabricDeployer *fabric.FabricDeployer) (string, string, error) {
+
+ // Try to get orderer info from network nodes first
+ networkNodes, err := s.GetNetworkNodes(ctx, networkID)
+ if err != nil {
+ return "", "", fmt.Errorf("failed to get network nodes: %w", err)
+ }
+
+ var ordererAddress, ordererTLSCert string
+
+ // Look for orderer in our registry
+ for _, node := range networkNodes {
+ if node.Node.NodeType == nodetypes.NodeTypeFabricOrderer {
+ if node.Node.FabricOrderer == nil {
+ continue
+ }
+ ordererAddress = node.Node.FabricOrderer.ExternalEndpoint
+ ordererTLSCert = node.Node.FabricOrderer.TLSCACert
+ break
+ }
+ }
+
+ // If no orderer found in registry, try to get from current config block
+ if ordererAddress == "" {
+ // Get current config block
+ configBlock, err := fabricDeployer.GetCurrentChannelConfig(networkID)
+ if err != nil {
+ return "", "", fmt.Errorf("failed to get current config block: %w", err)
+ }
+
+ // Extract orderer info from config block
+ ordererInfo, err := fabricDeployer.GetOrderersFromConfigBlock(ctx, configBlock)
+ if err != nil {
+ return "", "", fmt.Errorf("failed to get orderer info from config: %w", err)
+ }
+ if len(ordererInfo) == 0 {
+ return "", "", fmt.Errorf("no orderer found in config block")
+ }
+ ordererAddress = ordererInfo[0].URL
+ ordererTLSCert = ordererInfo[0].TLSCert
+ }
+
+ if ordererAddress == "" {
+ return "", "", fmt.Errorf("no orderer found in network or config block")
+ }
+
+ return ordererAddress, ordererTLSCert, nil
+}
+
+func (s *NetworkService) GetFabricChainInfo(ctx context.Context, networkID int64) (*ChainInfo, error) {
+ fabricDeployer, err := s.getFabricDeployerForNetwork(ctx, networkID)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get fabric deployer: %w", err)
+ }
+ chainInfo, err := fabricDeployer.GetChainInfo(ctx, networkID)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get chain info: %w", err)
+ }
+ return &ChainInfo{
+ Height: chainInfo.Height,
+ CurrentBlockHash: chainInfo.CurrentBlockHash,
+ PreviousBlockHash: chainInfo.PreviousBlockHash,
+ }, nil
+}
+
+// GetFabricBlocks retrieves a paginated list of blocks from the network
+func (s *NetworkService) GetFabricBlocks(ctx context.Context, networkID int64, limit, offset int32, reverse bool) ([]fabricblock.Block, int64, error) {
+ // Get the fabric deployer for this network
+ fabricDeployer, err := s.getFabricDeployerForNetwork(ctx, networkID)
+ if err != nil {
+ return nil, 0, fmt.Errorf("failed to get fabric deployer: %w", err)
+ }
+
+ // Use the fabric deployer to get blocks
+ fabricBlocks, total, err := fabricDeployer.GetBlocks(ctx, networkID, limit, offset, reverse)
+ if err != nil {
+ return nil, 0, fmt.Errorf("failed to get blocks: %w", err)
+ }
+
+ return fabricBlocks, total, nil
+}
+
+// GetFabricBlock retrieves all transactions from a specific block
+func (s *NetworkService) GetFabricBlock(ctx context.Context, networkID int64, blockNum uint64) (*fabricblock.Block, error) {
+ // Get the fabric deployer for this network
+ fabricDeployer, err := s.getFabricDeployerForNetwork(ctx, networkID)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get fabric deployer: %w", err)
+ }
+
+ // Use the fabric deployer to get block transactions
+ fabricTransactions, err := fabricDeployer.GetBlock(ctx, networkID, blockNum)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get block transactions: %w", err)
+ }
+
+ return fabricTransactions, nil
+}
+
+// GetTransaction retrieves a specific transaction by its ID
+func (s *NetworkService) GetFabricBlockByTransaction(ctx context.Context, networkID int64, txID string) (*fabricblock.Block, error) {
+ // Get the fabric deployer for this network
+ fabricDeployer, err := s.getFabricDeployerForNetwork(ctx, networkID)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get fabric deployer: %w", err)
+ }
+
+ // Use the fabric deployer to get transaction
+ block, err := fabricDeployer.GetBlockByTransaction(ctx, networkID, txID)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get block: %w", err)
+ }
+
+ return block, nil
+}
+
+// getFabricDeployerForNetwork creates and returns a fabric deployer for the specified network
+func (s *NetworkService) getFabricDeployerForNetwork(ctx context.Context, networkID int64) (*fabric.FabricDeployer, error) {
+ // Get network details to verify it exists and is a Fabric network
+ network, err := s.db.GetNetwork(ctx, networkID)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get network: %w", err)
+ }
+ deployer, err := s.deployerFactory.GetDeployer(network.Platform)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get deployer: %w", err)
+ }
+
+ fabricDeployer, ok := deployer.(*fabric.FabricDeployer)
+ if !ok {
+ return nil, fmt.Errorf("network %d is not a Fabric network", networkID)
+ }
+
+ return fabricDeployer, nil
+}
+
+// ImportNetworkWithOrgParams contains parameters for importing a network with organization details
+type ImportNetworkWithOrgParams struct {
+ ChannelID string
+ OrganizationID int64
+ OrdererURL string
+ OrdererTLSCert []byte
+ Description string
+}
+
+// ImportFabricNetworkWithOrg imports a Fabric network using organization details
+func (s *NetworkService) ImportFabricNetworkWithOrg(ctx context.Context, params ImportNetworkWithOrgParams) (*ImportNetworkResult, error) {
+ // Get the Fabric deployer
+ deployer, err := s.deployerFactory.GetDeployer("fabric")
+ if err != nil {
+ return nil, fmt.Errorf("failed to get Fabric deployer: %w", err)
+ }
+ fabricDeployer, ok := deployer.(*fabric.FabricDeployer)
+ if !ok {
+ return nil, fmt.Errorf("invalid deployer type")
+ }
+
+ // Import the network using the Fabric deployer
+ networkID, err := fabricDeployer.ImportNetworkWithOrg(ctx, params.ChannelID, params.OrganizationID, params.OrdererURL, params.OrdererTLSCert, params.Description)
+ if err != nil {
+ return nil, fmt.Errorf("failed to import Fabric network with org: %w", err)
+ }
+
+ return &ImportNetworkResult{
+ NetworkID: networkID,
+ Message: "Fabric network imported successfully with organization",
+ }, nil
+}
+
+func (s *NetworkService) importFabricNetwork(ctx context.Context, params ImportNetworkParams) (*ImportNetworkResult, error) {
+ // Get the Fabric deployer
+ deployer, err := s.deployerFactory.GetDeployer("fabric")
+ if err != nil {
+ return nil, fmt.Errorf("failed to get Fabric deployer: %w", err)
+ }
+
+ fabricDeployer, ok := deployer.(*fabric.FabricDeployer)
+ if !ok {
+ return nil, fmt.Errorf("invalid deployer type")
+ }
+
+ // Import the network using the Fabric deployer
+ networkID, err := fabricDeployer.ImportNetwork(ctx, params.GenesisFile, params.Description)
+ if err != nil {
+ return nil, fmt.Errorf("failed to import Fabric network: %w", err)
+ }
+
+ return &ImportNetworkResult{
+ NetworkID: networkID,
+ Message: "Fabric network imported successfully",
+ }, nil
+}
+
+// SetAnchorPeers sets the anchor peers for an organization in a Fabric network
+func (s *NetworkService) SetAnchorPeers(ctx context.Context, networkID, organizationID int64, anchorPeers []AnchorPeer) (string, error) {
+ // Get network details
+ network, err := s.db.GetNetwork(ctx, networkID)
+ if err != nil {
+ return "", fmt.Errorf("failed to get network: %w", err)
+ }
+
+ // Get deployer
+ deployer, err := s.deployerFactory.GetDeployer(network.Platform)
+ if err != nil {
+ return "", fmt.Errorf("failed to get deployer: %w", err)
+ }
+
+ fabricDeployer, ok := deployer.(*fabric.FabricDeployer)
+ if !ok {
+ return "", fmt.Errorf("network %d is not a Fabric network", networkID)
+ }
+
+ // Convert anchor peers to deployer format
+ deployerAnchorPeers := make([]types.HostPort, len(anchorPeers))
+ for i, ap := range anchorPeers {
+ deployerAnchorPeers[i] = types.HostPort{
+ Host: ap.Host,
+ Port: ap.Port,
+ }
+ }
+
+ // Try to get orderer info from network nodes first
+ networkNodes, err := s.GetNetworkNodes(ctx, networkID)
+ if err != nil {
+ return "", fmt.Errorf("failed to get network nodes: %w", err)
+ }
+
+ var ordererAddress, ordererTLSCert string
+
+ // Look for orderer in our registry
+ for _, node := range networkNodes {
+ if node.Node.NodeType == nodetypes.NodeTypeFabricOrderer {
+ if node.Node.FabricOrderer == nil {
+ continue
+ }
+ ordererAddress = node.Node.FabricOrderer.ExternalEndpoint
+ ordererTLSCert = node.Node.FabricOrderer.TLSCACert
+ break
+ }
+ }
+
+ // If no orderer found in registry, try to get from current config block
+ if ordererAddress == "" {
+ // Get current config block
+ configBlock, err := fabricDeployer.GetCurrentChannelConfig(networkID)
+ if err != nil {
+ return "", fmt.Errorf("failed to get current config block: %w", err)
+ }
+
+ // Extract orderer info from config block
+ ordererInfo, err := fabricDeployer.GetOrderersFromConfigBlock(ctx, configBlock)
+ if err != nil {
+ return "", fmt.Errorf("failed to get orderer info from config: %w", err)
+ }
+ if len(ordererInfo) == 0 {
+ return "", fmt.Errorf("no orderer found in config block")
+ }
+ ordererAddress = ordererInfo[0].URL
+ ordererTLSCert = ordererInfo[0].TLSCert
+ }
+
+ if ordererAddress == "" {
+ return "", fmt.Errorf("no orderer found in network or config block")
+ }
+
+ // Set anchor peers using deployer with the found orderer info
+ txID, err := fabricDeployer.SetAnchorPeersWithOrderer(ctx, networkID, organizationID, deployerAnchorPeers, ordererAddress, ordererTLSCert)
+ if err != nil {
+ return "", err
+ }
+
+ logrus.Info("Reloading network block after setting anchor peers, waiting 3 seconds")
+ time.Sleep(3 * time.Second)
+
+ // Reload network block
+ if err := s.ReloadFabricNetworkBlock(ctx, networkID); err != nil {
+ logrus.Errorf("Failed to reload network block after setting anchor peers: %v", err)
+ }
+
+ return txID, nil
+}
+
+// ReloadFabricNetworkBlock reloads the network block for a given network ID
+func (s *NetworkService) ReloadFabricNetworkBlock(ctx context.Context, networkID int64) error {
+ // Get the network
+ network, err := s.db.GetNetwork(ctx, networkID)
+ if err != nil {
+ if err == sql.ErrNoRows {
+ return fmt.Errorf("network with id %d not found", networkID)
+ }
+ return fmt.Errorf("failed to get network: %w", err)
+ }
+
+ // Get the deployer for this network type
+ deployer, err := s.deployerFactory.GetDeployer(network.Platform)
+ if err != nil {
+ return fmt.Errorf("failed to get deployer: %w", err)
+ }
+ fabricDeployer, ok := deployer.(*fabric.FabricDeployer)
+ if !ok {
+ return fmt.Errorf("network %d is not a Fabric network", networkID)
+ }
+
+ // Get the current config block
+ configBlock, err := fabricDeployer.FetchCurrentChannelConfig(ctx, networkID)
+ if err != nil {
+ return fmt.Errorf("failed to get current config block: %w", err)
+ }
+ configBlockB64 := base64.StdEncoding.EncodeToString(configBlock)
+
+ err = s.db.UpdateNetworkCurrentConfigBlock(ctx, &db.UpdateNetworkCurrentConfigBlockParams{
+ ID: networkID,
+ CurrentConfigBlockB64: sql.NullString{String: configBlockB64, Valid: true},
+ })
+ if err != nil {
+ return fmt.Errorf("failed to update network config block: %w", err)
+ }
+
+ return nil
+}
+
+// GetNetworkConfig retrieves the network configuration as YAML
+func (s *NetworkService) GetNetworkConfig(ctx context.Context, networkID, orgID int64) (string, error) {
+ // Get the network
+ network, err := s.db.GetNetwork(ctx, networkID)
+ if err != nil {
+ return "", fmt.Errorf("failed to get network: %w", err)
+ }
+
+ // Get the deployer
+ deployer, err := s.deployerFactory.GetDeployer(network.Platform)
+ if err != nil {
+ return "", fmt.Errorf("failed to get deployer: %w", err)
+ }
+
+ fabricDeployer, ok := deployer.(*fabric.FabricDeployer)
+ if !ok {
+ return "", fmt.Errorf("network %d is not a Fabric network", networkID)
+ }
+
+ // Generate network config YAML
+ configYAML, err := fabricDeployer.GenerateNetworkConfig(ctx, networkID, orgID)
+ if err != nil {
+ return "", fmt.Errorf("failed to generate network config: %w", err)
+ }
+
+ return configYAML, nil
+}
+
+// UnjoinPeerFromNetwork removes a peer from a channel but keeps it in the network
+func (s *NetworkService) UnjoinPeerFromNetwork(networkID, peerID int64) error {
+ network, err := s.db.GetNetwork(context.Background(), networkID)
+ if err != nil {
+ return fmt.Errorf("failed to get network: %w", err)
+ }
+ deployer, err := s.deployerFactory.GetDeployer(network.Platform)
+ if err != nil {
+ return fmt.Errorf("failed to get deployer: %w", err)
+ }
+
+ fabricDeployer, ok := deployer.(*fabric.FabricDeployer)
+ if !ok {
+ return fmt.Errorf("network %d is not a Fabric network", networkID)
+ }
+
+ if err := fabricDeployer.UnjoinNode(networkID, peerID); err != nil {
+ return fmt.Errorf("failed to unjoin peer: %w", err)
+ }
+
+ logrus.Infof("unjoined peer %d from network %d", peerID, networkID)
+ return nil
+}
+
+// UnjoinOrdererFromNetwork removes an orderer from a channel but keeps it in the network
+func (s *NetworkService) UnjoinOrdererFromNetwork(networkID, ordererID int64) error {
+ network, err := s.db.GetNetwork(context.Background(), networkID)
+ if err != nil {
+ return fmt.Errorf("failed to get network: %w", err)
+ }
+ deployer, err := s.deployerFactory.GetDeployer(network.Platform)
+ if err != nil {
+ return fmt.Errorf("failed to get deployer: %w", err)
+ }
+
+ fabricDeployer, ok := deployer.(*fabric.FabricDeployer)
+ if !ok {
+ return fmt.Errorf("network %d is not a Fabric network", networkID)
+ }
+
+ if err := fabricDeployer.UnjoinNode(networkID, ordererID); err != nil {
+ return fmt.Errorf("failed to unjoin orderer: %w", err)
+ }
+
+ logrus.Infof("unjoined orderer %d from network %d", ordererID, networkID)
+ return nil
+}
+
+// JoinPeerToNetwork joins a peer to a Fabric network
+func (s *NetworkService) JoinPeerToNetwork(networkID, peerID int64) error {
+ network, err := s.db.GetNetwork(context.Background(), networkID)
+ if err != nil {
+ return fmt.Errorf("failed to get network: %w", err)
+ }
+ deployer, err := s.deployerFactory.GetDeployer(network.Platform)
+ if err != nil {
+ return fmt.Errorf("failed to get deployer: %w", err)
+ }
+ if !network.GenesisBlockB64.Valid {
+ return fmt.Errorf("genesis block is not set for network %d", networkID)
+ }
+ genesisBlockBytes, err := base64.StdEncoding.DecodeString(network.GenesisBlockB64.String)
+ if err != nil {
+ return fmt.Errorf("failed to decode genesis block: %w", err)
+ }
+ err = deployer.JoinNode(network.ID, genesisBlockBytes, peerID)
+ if err != nil {
+ return fmt.Errorf("failed to join node: %w", err)
+ }
+ logrus.Infof("joined peer %d to network %d", peerID, networkID)
+
+ return nil
+}
+
+// JoinOrdererToNetwork joins an orderer to a Fabric network
+func (s *NetworkService) JoinOrdererToNetwork(networkID, ordererID int64) error {
+ network, err := s.db.GetNetwork(context.Background(), networkID)
+ if err != nil {
+ return fmt.Errorf("failed to get network: %w", err)
+ }
+ deployer, err := s.deployerFactory.GetDeployer(network.Platform)
+ if err != nil {
+ return fmt.Errorf("failed to get deployer: %w", err)
+ }
+ if !network.GenesisBlockB64.Valid {
+ return fmt.Errorf("genesis block is not set for network %d", networkID)
+ }
+ genesisBlockBytes, err := base64.StdEncoding.DecodeString(network.GenesisBlockB64.String)
+ if err != nil {
+ return fmt.Errorf("failed to decode genesis block: %w", err)
+ }
+ err = deployer.JoinNode(network.ID, genesisBlockBytes, ordererID)
+ if err != nil {
+ return fmt.Errorf("failed to join node: %w", err)
+ }
+ logrus.Infof("joined orderer %d to network %d", ordererID, networkID)
+
+ return nil
+}
+
+// RemovePeerFromNetwork removes a peer from a Fabric network
+func (s *NetworkService) RemovePeerFromNetwork(networkID, peerID int64) error {
+ // Get the appropriate deployer
+ network, err := s.db.GetNetwork(context.Background(), networkID)
+ if err != nil {
+ return fmt.Errorf("failed to get network: %w", err)
+ }
+
+ deployer, err := s.deployerFactory.GetDeployer(network.Platform)
+ if err != nil {
+ return fmt.Errorf("failed to get deployer: %w", err)
+ }
+
+ fabricDeployer, ok := deployer.(*fabric.FabricDeployer)
+ if !ok {
+ return fmt.Errorf("network %d is not a Fabric network", networkID)
+ }
+
+ if err := fabricDeployer.RemoveNode(networkID, peerID); err != nil {
+ return fmt.Errorf("failed to remove peer: %w", err)
+ }
+
+ logrus.Infof("removed peer %d from network %d", peerID, networkID)
+ return nil
+}
+
+// RemoveOrdererFromNetwork removes an orderer from a Fabric network
+func (s *NetworkService) RemoveOrdererFromNetwork(networkID, ordererID int64) error {
+ // Get the appropriate deployer
+ network, err := s.db.GetNetwork(context.Background(), networkID)
+ if err != nil {
+ return fmt.Errorf("failed to get network: %w", err)
+ }
+
+ deployer, err := s.deployerFactory.GetDeployer(network.Platform)
+ if err != nil {
+ return fmt.Errorf("failed to get deployer: %w", err)
+ }
+
+ fabricDeployer, ok := deployer.(*fabric.FabricDeployer)
+ if !ok {
+ return fmt.Errorf("network %d is not a Fabric network", networkID)
+ }
+
+ if err := fabricDeployer.RemoveNode(networkID, ordererID); err != nil {
+ return fmt.Errorf("failed to remove orderer: %w", err)
+ }
+
+ logrus.Infof("removed orderer %d from network %d", ordererID, networkID)
+ return nil
+}
+
+// GetFabricCurrentChannelConfig retrieves the current channel configuration for a network
+func (s *NetworkService) GetFabricCurrentChannelConfig(networkID int64) (map[string]interface{}, error) {
+ // Get the appropriate deployer
+ network, err := s.db.GetNetwork(context.Background(), networkID)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get network: %w", err)
+ }
+
+ deployer, err := s.deployerFactory.GetDeployer(network.Platform)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get deployer: %w", err)
+ }
+
+ fabricDeployer, ok := deployer.(*fabric.FabricDeployer)
+ if !ok {
+ return nil, fmt.Errorf("network %d is not a Fabric network", networkID)
+ }
+
+ return fabricDeployer.GetCurrentChannelConfigAsMap(networkID)
+}
+
+// GetFabricChannelConfig retrieves the channel configuration for a network
+func (s *NetworkService) GetFabricChannelConfig(networkID int64) (map[string]interface{}, error) {
+ // Get the appropriate deployer
+ network, err := s.db.GetNetwork(context.Background(), networkID)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get network: %w", err)
+ }
+
+ deployer, err := s.deployerFactory.GetDeployer(network.Platform)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get deployer: %w", err)
+ }
+
+ fabricDeployer, ok := deployer.(*fabric.FabricDeployer)
+ if !ok {
+ return nil, fmt.Errorf("network %d is not a Fabric network", networkID)
+ }
+
+ return fabricDeployer.GetChannelConfig(networkID)
+}
diff --git a/pkg/networks/service/fabric/deployer.go b/pkg/networks/service/fabric/deployer.go
index 529a3c5..ed15d40 100644
--- a/pkg/networks/service/fabric/deployer.go
+++ b/pkg/networks/service/fabric/deployer.go
@@ -953,7 +953,6 @@ func (d *FabricDeployer) CreateGenesisBlock(networkID int64, config interface{})
}
peerNodes := []nodeservice.NodeResponse{}
for _, node := range orgNodes {
-
peerNodes = append(peerNodes, node)
listCreateNetworkNodes = append(listCreateNetworkNodes, &db.CreateNetworkNodeParams{
NetworkID: networkID,
@@ -1019,7 +1018,6 @@ func (d *FabricDeployer) CreateGenesisBlock(networkID int64, config interface{})
signCACert := *signKey.Certificate
tlsCACert := *tlsKey.Certificate
- // Get orderer nodes for this organization
ordererNodes := []*nodeservice.NodeResponse{}
for _, nodeID := range org.NodeIDs {
node, err := d.nodes.GetNode(ctx, nodeID)
@@ -2558,8 +2556,8 @@ func (d *FabricDeployer) MapBlock(blk *cb.Block) (*block.Block, error) {
return blockResponse, nil
}
-// GetBlockTransactions retrieves all transactions from a specific block
-func (d *FabricDeployer) GetBlockTransactions(ctx context.Context, networkID int64, blockNum uint64) (*block.Block, error) {
+// GetBlock retrieves all transactions from a specific block
+func (d *FabricDeployer) GetBlock(ctx context.Context, networkID int64, blockNum uint64) (*block.Block, error) {
// Get network details
network, err := d.db.GetNetwork(ctx, networkID)
if err != nil {
diff --git a/pkg/networks/service/service.go b/pkg/networks/service/service.go
index b6424f6..89502e2 100644
--- a/pkg/networks/service/service.go
+++ b/pkg/networks/service/service.go
@@ -12,15 +12,10 @@ import (
orgservicefabric "github.com/chainlaunch/chainlaunch/pkg/fabric/service"
keymanagement "github.com/chainlaunch/chainlaunch/pkg/keymanagement/service"
"github.com/chainlaunch/chainlaunch/pkg/logger"
- "github.com/chainlaunch/chainlaunch/pkg/networks/service/besu"
- "github.com/chainlaunch/chainlaunch/pkg/networks/service/fabric"
- "github.com/chainlaunch/chainlaunch/pkg/networks/service/fabric/block"
"github.com/chainlaunch/chainlaunch/pkg/networks/service/types"
nodeservice "github.com/chainlaunch/chainlaunch/pkg/nodes/service"
nodetypes "github.com/chainlaunch/chainlaunch/pkg/nodes/types"
- nodeutils "github.com/chainlaunch/chainlaunch/pkg/nodes/utils"
"github.com/google/uuid"
- "github.com/sirupsen/logrus"
)
// BlockchainType represents the type of blockchain network
@@ -105,7 +100,7 @@ type ProposalSignature struct {
type NetworkService struct {
db *db.Queries
deployerFactory *DeployerFactory
- nodes *nodeservice.NodeService
+ nodeService *nodeservice.NodeService
keyMgmt *keymanagement.KeyManagementService
logger *logger.Logger
orgService *orgservicefabric.OrganizationService
@@ -116,7 +111,7 @@ func NewNetworkService(db *db.Queries, nodes *nodeservice.NodeService, keyMgmt *
return &NetworkService{
db: db,
deployerFactory: NewDeployerFactory(db, nodes, keyMgmt, orgService),
- nodes: nodes,
+ nodeService: nodes,
keyMgmt: keyMgmt,
logger: logger,
orgService: orgService,
@@ -210,152 +205,6 @@ func (s *NetworkService) CreateNetwork(ctx context.Context, name, description st
return s.mapDBNetworkToServiceNetwork(network), nil
}
-// JoinPeerToNetwork joins a peer to a Fabric network
-func (s *NetworkService) JoinPeerToNetwork(networkID, peerID int64) error {
- network, err := s.db.GetNetwork(context.Background(), networkID)
- if err != nil {
- return fmt.Errorf("failed to get network: %w", err)
- }
- deployer, err := s.deployerFactory.GetDeployer(network.Platform)
- if err != nil {
- return fmt.Errorf("failed to get deployer: %w", err)
- }
- if !network.GenesisBlockB64.Valid {
- return fmt.Errorf("genesis block is not set for network %d", networkID)
- }
- genesisBlockBytes, err := base64.StdEncoding.DecodeString(network.GenesisBlockB64.String)
- if err != nil {
- return fmt.Errorf("failed to decode genesis block: %w", err)
- }
- err = deployer.JoinNode(network.ID, genesisBlockBytes, peerID)
- if err != nil {
- return fmt.Errorf("failed to join node: %w", err)
- }
- logrus.Infof("joined peer %d to network %d", peerID, networkID)
-
- return nil
-}
-
-// JoinOrdererToNetwork joins an orderer to a Fabric network
-func (s *NetworkService) JoinOrdererToNetwork(networkID, ordererID int64) error {
- network, err := s.db.GetNetwork(context.Background(), networkID)
- if err != nil {
- return fmt.Errorf("failed to get network: %w", err)
- }
- deployer, err := s.deployerFactory.GetDeployer(network.Platform)
- if err != nil {
- return fmt.Errorf("failed to get deployer: %w", err)
- }
- if !network.GenesisBlockB64.Valid {
- return fmt.Errorf("genesis block is not set for network %d", networkID)
- }
- genesisBlockBytes, err := base64.StdEncoding.DecodeString(network.GenesisBlockB64.String)
- if err != nil {
- return fmt.Errorf("failed to decode genesis block: %w", err)
- }
- err = deployer.JoinNode(network.ID, genesisBlockBytes, ordererID)
- if err != nil {
- return fmt.Errorf("failed to join node: %w", err)
- }
- logrus.Infof("joined orderer %d to network %d", ordererID, networkID)
-
- return nil
-}
-
-// RemovePeerFromNetwork removes a peer from a Fabric network
-func (s *NetworkService) RemovePeerFromNetwork(networkID, peerID int64) error {
- // Get the appropriate deployer
- network, err := s.db.GetNetwork(context.Background(), networkID)
- if err != nil {
- return fmt.Errorf("failed to get network: %w", err)
- }
-
- deployer, err := s.deployerFactory.GetDeployer(network.Platform)
- if err != nil {
- return fmt.Errorf("failed to get deployer: %w", err)
- }
-
- fabricDeployer, ok := deployer.(*fabric.FabricDeployer)
- if !ok {
- return fmt.Errorf("network %d is not a Fabric network", networkID)
- }
-
- if err := fabricDeployer.RemoveNode(networkID, peerID); err != nil {
- return fmt.Errorf("failed to remove peer: %w", err)
- }
-
- logrus.Infof("removed peer %d from network %d", peerID, networkID)
- return nil
-}
-
-// RemoveOrdererFromNetwork removes an orderer from a Fabric network
-func (s *NetworkService) RemoveOrdererFromNetwork(networkID, ordererID int64) error {
- // Get the appropriate deployer
- network, err := s.db.GetNetwork(context.Background(), networkID)
- if err != nil {
- return fmt.Errorf("failed to get network: %w", err)
- }
-
- deployer, err := s.deployerFactory.GetDeployer(network.Platform)
- if err != nil {
- return fmt.Errorf("failed to get deployer: %w", err)
- }
-
- fabricDeployer, ok := deployer.(*fabric.FabricDeployer)
- if !ok {
- return fmt.Errorf("network %d is not a Fabric network", networkID)
- }
-
- if err := fabricDeployer.RemoveNode(networkID, ordererID); err != nil {
- return fmt.Errorf("failed to remove orderer: %w", err)
- }
-
- logrus.Infof("removed orderer %d from network %d", ordererID, networkID)
- return nil
-}
-
-// GetCurrentChannelConfig retrieves the current channel configuration for a network
-func (s *NetworkService) GetCurrentChannelConfig(networkID int64) (map[string]interface{}, error) {
- // Get the appropriate deployer
- network, err := s.db.GetNetwork(context.Background(), networkID)
- if err != nil {
- return nil, fmt.Errorf("failed to get network: %w", err)
- }
-
- deployer, err := s.deployerFactory.GetDeployer(network.Platform)
- if err != nil {
- return nil, fmt.Errorf("failed to get deployer: %w", err)
- }
-
- fabricDeployer, ok := deployer.(*fabric.FabricDeployer)
- if !ok {
- return nil, fmt.Errorf("network %d is not a Fabric network", networkID)
- }
-
- return fabricDeployer.GetCurrentChannelConfigAsMap(networkID)
-}
-
-// GetChannelConfig retrieves the channel configuration for a network
-func (s *NetworkService) GetChannelConfig(networkID int64) (map[string]interface{}, error) {
- // Get the appropriate deployer
- network, err := s.db.GetNetwork(context.Background(), networkID)
- if err != nil {
- return nil, fmt.Errorf("failed to get network: %w", err)
- }
-
- deployer, err := s.deployerFactory.GetDeployer(network.Platform)
- if err != nil {
- return nil, fmt.Errorf("failed to get deployer: %w", err)
- }
-
- fabricDeployer, ok := deployer.(*fabric.FabricDeployer)
- if !ok {
- return nil, fmt.Errorf("network %d is not a Fabric network", networkID)
- }
-
- return fabricDeployer.GetChannelConfig(networkID)
-}
-
// ListNetworks retrieves a list of networks with pagination
func (s *NetworkService) ListNetworks(ctx context.Context, params ListNetworksParams) (*ListNetworksResult, error) {
networks, err := s.db.ListNetworks(ctx)
@@ -387,22 +236,6 @@ func (s *NetworkService) GetNetwork(ctx context.Context, networkID int64) (*Netw
// DeleteNetwork deletes a network and all associated resources
func (s *NetworkService) DeleteNetwork(ctx context.Context, networkID int64) error {
- // Get network to determine platform
- // network, err := s.db.GetNetwork(ctx, networkID)
- // if err != nil {
- // return fmt.Errorf("failed to get network: %w", err)
- // }
-
- // Get the appropriate deployer
- // deployer, err := s.deployerFactory.GetDeployer(network.Platform)
- // if err != nil {
- // return fmt.Errorf("failed to get deployer: %w", err)
- // }
-
- // Delete network resources using deployer
- // if err := deployer.DeleteNetwork(networkID); err != nil {
- // return fmt.Errorf("failed to delete network resources: %w", err)
- // }
// Delete network record
if err := s.db.DeleteNetwork(ctx, networkID); err != nil {
@@ -482,36 +315,11 @@ func (s *NetworkService) GetNetworkNodes(ctx context.Context, networkID int64) (
nodes := make([]NetworkNode, len(dbNodes))
for i, dbNode := range dbNodes {
- deploymentConfig, err := nodeutils.DeserializeDeploymentConfig(dbNode.DeploymentConfig.String)
- if err != nil {
- return nil, fmt.Errorf("failed to deserialize deployment config: %w", err)
- }
- nodeConfig, err := nodeutils.LoadNodeConfig([]byte(dbNode.NodeConfig.String))
+ node, err := s.nodeService.GetNode(ctx, dbNode.NodeID)
if err != nil {
- return nil, fmt.Errorf("failed to load node config: %w", err)
- }
- node := nodeservice.Node{
- ID: dbNode.NodeID,
- Name: dbNode.Name,
- BlockchainPlatform: nodetypes.BlockchainPlatform(dbNode.Platform),
- NodeType: nodetypes.NodeType(dbNode.NodeType.String),
- Status: nodetypes.NodeStatus(dbNode.Status_2),
- Endpoint: dbNode.Endpoint.String,
- PublicEndpoint: dbNode.PublicEndpoint.String,
- NodeConfig: nodeConfig,
- DeploymentConfig: deploymentConfig,
- CreatedAt: dbNode.CreatedAt_2,
- UpdatedAt: dbNode.UpdatedAt_2.Time,
- }
- if node.NodeType == nodetypes.NodeTypeFabricPeer {
- if peerConfig, ok := nodeConfig.(*nodetypes.FabricPeerConfig); ok {
- node.MSPID = peerConfig.MSPID
- }
- } else if node.NodeType == nodetypes.NodeTypeFabricOrderer {
- if ordererConfig, ok := nodeConfig.(*nodetypes.FabricOrdererConfig); ok {
- node.MSPID = ordererConfig.MSPID
- }
+ return nil, fmt.Errorf("failed to get node: %w", err)
}
+
nodes[i] = NetworkNode{
ID: dbNode.ID,
NetworkID: dbNode.NetworkID,
@@ -529,14 +337,14 @@ func (s *NetworkService) GetNetworkNodes(ctx context.Context, networkID int64) (
// NetworkNode represents a node in a network with its full details
type NetworkNode struct {
- ID int64 `json:"id"`
- NetworkID int64 `json:"networkId"`
- NodeID int64 `json:"nodeId"`
- Status string `json:"status"`
- Role string `json:"role"`
- CreatedAt time.Time `json:"createdAt"`
- UpdatedAt time.Time `json:"updatedAt"`
- Node nodeservice.Node `json:"node"`
+ ID int64 `json:"id"`
+ NetworkID int64 `json:"networkId"`
+ NodeID int64 `json:"nodeId"`
+ Status string `json:"status"`
+ Role string `json:"role"`
+ CreatedAt time.Time `json:"createdAt"`
+ UpdatedAt time.Time `json:"updatedAt"`
+ Node *nodeservice.NodeResponse `json:"node"`
}
// AddNodeToNetwork adds a node to the network with the specified role
@@ -548,7 +356,7 @@ func (s *NetworkService) AddNodeToNetwork(ctx context.Context, networkID, nodeID
}
// Get the node
- node, err := s.nodes.GetNode(ctx, nodeID)
+ node, err := s.nodeService.GetNode(ctx, nodeID)
if err != nil {
return fmt.Errorf("failed to get node: %w", err)
}
@@ -598,216 +406,6 @@ func (s *NetworkService) AddNodeToNetwork(ctx context.Context, networkID, nodeID
return nil
}
-// UnjoinPeerFromNetwork removes a peer from a channel but keeps it in the network
-func (s *NetworkService) UnjoinPeerFromNetwork(networkID, peerID int64) error {
- network, err := s.db.GetNetwork(context.Background(), networkID)
- if err != nil {
- return fmt.Errorf("failed to get network: %w", err)
- }
- deployer, err := s.deployerFactory.GetDeployer(network.Platform)
- if err != nil {
- return fmt.Errorf("failed to get deployer: %w", err)
- }
-
- fabricDeployer, ok := deployer.(*fabric.FabricDeployer)
- if !ok {
- return fmt.Errorf("network %d is not a Fabric network", networkID)
- }
-
- if err := fabricDeployer.UnjoinNode(networkID, peerID); err != nil {
- return fmt.Errorf("failed to unjoin peer: %w", err)
- }
-
- logrus.Infof("unjoined peer %d from network %d", peerID, networkID)
- return nil
-}
-
-// UnjoinOrdererFromNetwork removes an orderer from a channel but keeps it in the network
-func (s *NetworkService) UnjoinOrdererFromNetwork(networkID, ordererID int64) error {
- network, err := s.db.GetNetwork(context.Background(), networkID)
- if err != nil {
- return fmt.Errorf("failed to get network: %w", err)
- }
- deployer, err := s.deployerFactory.GetDeployer(network.Platform)
- if err != nil {
- return fmt.Errorf("failed to get deployer: %w", err)
- }
-
- fabricDeployer, ok := deployer.(*fabric.FabricDeployer)
- if !ok {
- return fmt.Errorf("network %d is not a Fabric network", networkID)
- }
-
- if err := fabricDeployer.UnjoinNode(networkID, ordererID); err != nil {
- return fmt.Errorf("failed to unjoin orderer: %w", err)
- }
-
- logrus.Infof("unjoined orderer %d from network %d", ordererID, networkID)
- return nil
-}
-
-type AnchorPeer struct {
- Host string `json:"host"`
- Port int `json:"port"`
-}
-
-// SetAnchorPeers sets the anchor peers for an organization in a Fabric network
-func (s *NetworkService) SetAnchorPeers(ctx context.Context, networkID, organizationID int64, anchorPeers []AnchorPeer) (string, error) {
- // Get network details
- network, err := s.db.GetNetwork(ctx, networkID)
- if err != nil {
- return "", fmt.Errorf("failed to get network: %w", err)
- }
-
- // Get deployer
- deployer, err := s.deployerFactory.GetDeployer(network.Platform)
- if err != nil {
- return "", fmt.Errorf("failed to get deployer: %w", err)
- }
-
- fabricDeployer, ok := deployer.(*fabric.FabricDeployer)
- if !ok {
- return "", fmt.Errorf("network %d is not a Fabric network", networkID)
- }
-
- // Convert anchor peers to deployer format
- deployerAnchorPeers := make([]types.HostPort, len(anchorPeers))
- for i, ap := range anchorPeers {
- deployerAnchorPeers[i] = types.HostPort{
- Host: ap.Host,
- Port: ap.Port,
- }
- }
-
- // Try to get orderer info from network nodes first
- networkNodes, err := s.GetNetworkNodes(ctx, networkID)
- if err != nil {
- return "", fmt.Errorf("failed to get network nodes: %w", err)
- }
-
- var ordererAddress, ordererTLSCert string
-
- // Look for orderer in our registry
- for _, node := range networkNodes {
- if node.Node.NodeType == nodetypes.NodeTypeFabricOrderer {
- ordererConfig, ok := node.Node.DeploymentConfig.(*nodetypes.FabricOrdererDeploymentConfig)
- if !ok {
- continue
- }
- ordererAddress = ordererConfig.ExternalEndpoint
- ordererTLSCert = ordererConfig.TLSCACert
- break
- }
- }
-
- // If no orderer found in registry, try to get from current config block
- if ordererAddress == "" {
- // Get current config block
- configBlock, err := fabricDeployer.GetCurrentChannelConfig(networkID)
- if err != nil {
- return "", fmt.Errorf("failed to get current config block: %w", err)
- }
-
- // Extract orderer info from config block
- ordererInfo, err := fabricDeployer.GetOrderersFromConfigBlock(ctx, configBlock)
- if err != nil {
- return "", fmt.Errorf("failed to get orderer info from config: %w", err)
- }
- if len(ordererInfo) == 0 {
- return "", fmt.Errorf("no orderer found in config block")
- }
- ordererAddress = ordererInfo[0].URL
- ordererTLSCert = ordererInfo[0].TLSCert
- }
-
- if ordererAddress == "" {
- return "", fmt.Errorf("no orderer found in network or config block")
- }
-
- // Set anchor peers using deployer with the found orderer info
- txID, err := fabricDeployer.SetAnchorPeersWithOrderer(ctx, networkID, organizationID, deployerAnchorPeers, ordererAddress, ordererTLSCert)
- if err != nil {
- return "", err
- }
-
- logrus.Info("Reloading network block after setting anchor peers, waiting 3 seconds")
- time.Sleep(3 * time.Second)
-
- // Reload network block
- if err := s.ReloadNetworkBlock(ctx, networkID); err != nil {
- logrus.Errorf("Failed to reload network block after setting anchor peers: %v", err)
- }
-
- return txID, nil
-}
-
-// ReloadNetworkBlock reloads the network block for a given network ID
-func (s *NetworkService) ReloadNetworkBlock(ctx context.Context, networkID int64) error {
- // Get the network
- network, err := s.db.GetNetwork(ctx, networkID)
- if err != nil {
- if err == sql.ErrNoRows {
- return fmt.Errorf("network with id %d not found", networkID)
- }
- return fmt.Errorf("failed to get network: %w", err)
- }
-
- // Get the deployer for this network type
- deployer, err := s.deployerFactory.GetDeployer(network.Platform)
- if err != nil {
- return fmt.Errorf("failed to get deployer: %w", err)
- }
- fabricDeployer, ok := deployer.(*fabric.FabricDeployer)
- if !ok {
- return fmt.Errorf("network %d is not a Fabric network", networkID)
- }
-
- // Get the current config block
- configBlock, err := fabricDeployer.FetchCurrentChannelConfig(ctx, networkID)
- if err != nil {
- return fmt.Errorf("failed to get current config block: %w", err)
- }
- configBlockB64 := base64.StdEncoding.EncodeToString(configBlock)
-
- err = s.db.UpdateNetworkCurrentConfigBlock(ctx, &db.UpdateNetworkCurrentConfigBlockParams{
- ID: networkID,
- CurrentConfigBlockB64: sql.NullString{String: configBlockB64, Valid: true},
- })
- if err != nil {
- return fmt.Errorf("failed to update network config block: %w", err)
- }
-
- return nil
-}
-
-// GetNetworkConfig retrieves the network configuration as YAML
-func (s *NetworkService) GetNetworkConfig(ctx context.Context, networkID, orgID int64) (string, error) {
- // Get the network
- network, err := s.db.GetNetwork(ctx, networkID)
- if err != nil {
- return "", fmt.Errorf("failed to get network: %w", err)
- }
-
- // Get the deployer
- deployer, err := s.deployerFactory.GetDeployer(network.Platform)
- if err != nil {
- return "", fmt.Errorf("failed to get deployer: %w", err)
- }
-
- fabricDeployer, ok := deployer.(*fabric.FabricDeployer)
- if !ok {
- return "", fmt.Errorf("network %d is not a Fabric network", networkID)
- }
-
- // Generate network config YAML
- configYAML, err := fabricDeployer.GenerateNetworkConfig(ctx, networkID, orgID)
- if err != nil {
- return "", fmt.Errorf("failed to generate network config: %w", err)
- }
-
- return configYAML, nil
-}
-
// GetGenesisBlock retrieves the genesis block for a network
func (s *NetworkService) GetGenesisBlock(ctx context.Context, networkID int64) ([]byte, error) {
network, err := s.db.GetNetwork(ctx, networkID)
@@ -836,302 +434,3 @@ func (s *NetworkService) ImportNetwork(ctx context.Context, params ImportNetwork
return nil, fmt.Errorf("unsupported network type: %s", params.NetworkType)
}
}
-
-// ImportNetworkWithOrgParams contains parameters for importing a network with organization details
-type ImportNetworkWithOrgParams struct {
- ChannelID string
- OrganizationID int64
- OrdererURL string
- OrdererTLSCert []byte
- Description string
-}
-
-// ImportNetworkWithOrg imports a Fabric network using organization details
-func (s *NetworkService) ImportNetworkWithOrg(ctx context.Context, params ImportNetworkWithOrgParams) (*ImportNetworkResult, error) {
- // Get the Fabric deployer
- deployer, err := s.deployerFactory.GetDeployer("fabric")
- if err != nil {
- return nil, fmt.Errorf("failed to get Fabric deployer: %w", err)
- }
- fabricDeployer, ok := deployer.(*fabric.FabricDeployer)
- if !ok {
- return nil, fmt.Errorf("invalid deployer type")
- }
-
- // Import the network using the Fabric deployer
- networkID, err := fabricDeployer.ImportNetworkWithOrg(ctx, params.ChannelID, params.OrganizationID, params.OrdererURL, params.OrdererTLSCert, params.Description)
- if err != nil {
- return nil, fmt.Errorf("failed to import Fabric network with org: %w", err)
- }
-
- return &ImportNetworkResult{
- NetworkID: networkID,
- Message: "Fabric network imported successfully with organization",
- }, nil
-}
-
-func (s *NetworkService) importFabricNetwork(ctx context.Context, params ImportNetworkParams) (*ImportNetworkResult, error) {
- // Get the Fabric deployer
- deployer, err := s.deployerFactory.GetDeployer("fabric")
- if err != nil {
- return nil, fmt.Errorf("failed to get Fabric deployer: %w", err)
- }
-
- fabricDeployer, ok := deployer.(*fabric.FabricDeployer)
- if !ok {
- return nil, fmt.Errorf("invalid deployer type")
- }
-
- // Import the network using the Fabric deployer
- networkID, err := fabricDeployer.ImportNetwork(ctx, params.GenesisFile, params.Description)
- if err != nil {
- return nil, fmt.Errorf("failed to import Fabric network: %w", err)
- }
-
- return &ImportNetworkResult{
- NetworkID: networkID,
- Message: "Fabric network imported successfully",
- }, nil
-}
-
-func (s *NetworkService) importBesuNetwork(ctx context.Context, params ImportNetworkParams) (*ImportNetworkResult, error) {
- // Get the Besu deployer
- deployer, err := s.deployerFactory.GetDeployer("besu")
- if err != nil {
- return nil, fmt.Errorf("failed to get Besu deployer: %w", err)
- }
-
- besuDeployer, ok := deployer.(*besu.BesuDeployer)
- if !ok {
- return nil, fmt.Errorf("invalid deployer type")
- }
-
- // Import the network using the Besu deployer
- networkID, err := besuDeployer.ImportNetwork(ctx, params.GenesisFile, params.Name, params.Description)
- if err != nil {
- return nil, fmt.Errorf("failed to import Besu network: %w", err)
- }
-
- return &ImportNetworkResult{
- NetworkID: networkID,
- Message: "Besu network imported successfully",
- }, nil
-}
-
-// UpdateFabricNetwork prepares a config update proposal for a Fabric network
-func (s *NetworkService) UpdateFabricNetwork(ctx context.Context, networkID int64, operations []fabric.ConfigUpdateOperation) (*fabric.ConfigUpdateProposal, error) {
- // Get deployer for the network
- deployer, err := s.deployerFactory.GetDeployer(string(BlockchainTypeFabric))
- if err != nil {
- return nil, fmt.Errorf("failed to get deployer: %w", err)
- }
-
- // Assert that it's a Fabric deployer
- fabricDeployer, ok := deployer.(*fabric.FabricDeployer)
- if !ok {
- return nil, fmt.Errorf("network %d is not a Fabric network", networkID)
- }
-
- // Prepare the config update
- proposal, err := fabricDeployer.PrepareConfigUpdate(ctx, networkID, operations)
- if err != nil {
- return nil, fmt.Errorf("failed to prepare config update: %w", err)
- }
-
- // Get organizations managed by us that can sign the config update
- orgs, err := s.db.ListFabricOrganizations(ctx)
- if err != nil {
- return nil, fmt.Errorf("failed to get network organizations: %w", err)
- }
- var signingOrgIDs []string
- for _, org := range orgs {
- signingOrgIDs = append(signingOrgIDs, org.MspID)
- }
-
- ordererAddress, ordererTLSCert, err := s.getOrdererAddressAndCertForNetwork(ctx, networkID, fabricDeployer)
- if err != nil {
- return nil, fmt.Errorf("failed to get orderer address and TLS certificate: %w", err)
- }
-
- res, err := fabricDeployer.UpdateChannelConfig(ctx, networkID, proposal.ConfigUpdateEnvelope, signingOrgIDs, ordererAddress, ordererTLSCert)
- if err != nil {
- return nil, fmt.Errorf("failed to update channel config: %w", err)
- }
- s.logger.Info("Channel config updated", "txID", res)
- return proposal, nil
-}
-
-func (s *NetworkService) getOrdererAddressAndCertForNetwork(ctx context.Context, networkID int64, fabricDeployer *fabric.FabricDeployer) (string, string, error) {
-
- // Try to get orderer info from network nodes first
- networkNodes, err := s.GetNetworkNodes(ctx, networkID)
- if err != nil {
- return "", "", fmt.Errorf("failed to get network nodes: %w", err)
- }
-
- var ordererAddress, ordererTLSCert string
-
- // Look for orderer in our registry
- for _, node := range networkNodes {
- if node.Node.NodeType == nodetypes.NodeTypeFabricOrderer {
- ordererConfig, ok := node.Node.DeploymentConfig.(*nodetypes.FabricOrdererDeploymentConfig)
- if !ok {
- continue
- }
- ordererAddress = ordererConfig.ExternalEndpoint
- ordererTLSCert = ordererConfig.TLSCACert
- break
- }
- }
-
- // If no orderer found in registry, try to get from current config block
- if ordererAddress == "" {
- // Get current config block
- configBlock, err := fabricDeployer.GetCurrentChannelConfig(networkID)
- if err != nil {
- return "", "", fmt.Errorf("failed to get current config block: %w", err)
- }
-
- // Extract orderer info from config block
- ordererInfo, err := fabricDeployer.GetOrderersFromConfigBlock(ctx, configBlock)
- if err != nil {
- return "", "", fmt.Errorf("failed to get orderer info from config: %w", err)
- }
- if len(ordererInfo) == 0 {
- return "", "", fmt.Errorf("no orderer found in config block")
- }
- ordererAddress = ordererInfo[0].URL
- ordererTLSCert = ordererInfo[0].TLSCert
- }
-
- if ordererAddress == "" {
- return "", "", fmt.Errorf("no orderer found in network or config block")
- }
-
- return ordererAddress, ordererTLSCert, nil
-}
-
-func (s *NetworkService) GetChainInfo(ctx context.Context, networkID int64) (*ChainInfo, error) {
- fabricDeployer, err := s.getFabricDeployerForNetwork(ctx, networkID)
- if err != nil {
- return nil, fmt.Errorf("failed to get fabric deployer: %w", err)
- }
- chainInfo, err := fabricDeployer.GetChainInfo(ctx, networkID)
- if err != nil {
- return nil, fmt.Errorf("failed to get chain info: %w", err)
- }
- return &ChainInfo{
- Height: chainInfo.Height,
- CurrentBlockHash: chainInfo.CurrentBlockHash,
- PreviousBlockHash: chainInfo.PreviousBlockHash,
- }, nil
-}
-
-// GetBlocks retrieves a paginated list of blocks from the network
-func (s *NetworkService) GetBlocks(ctx context.Context, networkID int64, limit, offset int32, reverse bool) ([]block.Block, int64, error) {
- // Get the fabric deployer for this network
- fabricDeployer, err := s.getFabricDeployerForNetwork(ctx, networkID)
- if err != nil {
- return nil, 0, fmt.Errorf("failed to get fabric deployer: %w", err)
- }
-
- // Use the fabric deployer to get blocks
- fabricBlocks, total, err := fabricDeployer.GetBlocks(ctx, networkID, limit, offset, reverse)
- if err != nil {
- return nil, 0, fmt.Errorf("failed to get blocks: %w", err)
- }
-
- return fabricBlocks, total, nil
-}
-
-// GetBlockTransactions retrieves all transactions from a specific block
-func (s *NetworkService) GetBlockTransactions(ctx context.Context, networkID int64, blockNum uint64) (*block.Block, error) {
- // Get the fabric deployer for this network
- fabricDeployer, err := s.getFabricDeployerForNetwork(ctx, networkID)
- if err != nil {
- return nil, fmt.Errorf("failed to get fabric deployer: %w", err)
- }
-
- // Use the fabric deployer to get block transactions
- fabricTransactions, err := fabricDeployer.GetBlockTransactions(ctx, networkID, blockNum)
- if err != nil {
- return nil, fmt.Errorf("failed to get block transactions: %w", err)
- }
-
- return fabricTransactions, nil
-}
-
-// GetTransaction retrieves a specific transaction by its ID
-func (s *NetworkService) GetBlockByTransaction(ctx context.Context, networkID int64, txID string) (*block.Block, error) {
- // Get the fabric deployer for this network
- fabricDeployer, err := s.getFabricDeployerForNetwork(ctx, networkID)
- if err != nil {
- return nil, fmt.Errorf("failed to get fabric deployer: %w", err)
- }
-
- // Use the fabric deployer to get transaction
- block, err := fabricDeployer.GetBlockByTransaction(ctx, networkID, txID)
- if err != nil {
- return nil, fmt.Errorf("failed to get block: %w", err)
- }
-
- return block, nil
-}
-
-// getFabricDeployerForNetwork creates and returns a fabric deployer for the specified network
-func (s *NetworkService) getFabricDeployerForNetwork(ctx context.Context, networkID int64) (*fabric.FabricDeployer, error) {
- // Get network details to verify it exists and is a Fabric network
- network, err := s.db.GetNetwork(ctx, networkID)
- if err != nil {
- return nil, fmt.Errorf("failed to get network: %w", err)
- }
- deployer, err := s.deployerFactory.GetDeployer(network.Platform)
- if err != nil {
- return nil, fmt.Errorf("failed to get deployer: %w", err)
- }
-
- fabricDeployer, ok := deployer.(*fabric.FabricDeployer)
- if !ok {
- return nil, fmt.Errorf("network %d is not a Fabric network", networkID)
- }
-
- return fabricDeployer, nil
-}
-
-// UpdateOrganizationCRL updates the CRL for an organization in the network
-func (s *NetworkService) UpdateOrganizationCRL(ctx context.Context, networkID, organizationID int64) (string, error) {
- // Get network details
- network, err := s.db.GetNetwork(ctx, networkID)
- if err != nil {
- return "", fmt.Errorf("failed to get network: %w", err)
- }
-
- // Get deployer
- deployer, err := s.deployerFactory.GetDeployer(network.Platform)
- if err != nil {
- return "", fmt.Errorf("failed to get deployer: %w", err)
- }
-
- fabricDeployer, ok := deployer.(*fabric.FabricDeployer)
- if !ok {
- return "", fmt.Errorf("network %d is not a Fabric network", networkID)
- }
-
- // Update the CRL in the network
- txID, err := fabricDeployer.UpdateOrganizationCRL(ctx, networkID, fabric.UpdateOrganizationCRLInput{
- OrganizationID: organizationID,
- })
- if err != nil {
- return "", fmt.Errorf("failed to update CRL: %w", err)
- }
-
- logrus.Info("Reloading network block after updating CRL, waiting 3 seconds")
- time.Sleep(3 * time.Second)
-
- // Reload network block
- if err := s.ReloadNetworkBlock(ctx, networkID); err != nil {
- logrus.Errorf("Failed to reload network block after updating CRL: %v", err)
- }
-
- return txID, nil
-}
diff --git a/pkg/networks/service/types/types.go b/pkg/networks/service/types/types.go
index d385277..396c24e 100644
--- a/pkg/networks/service/types/types.go
+++ b/pkg/networks/service/types/types.go
@@ -102,6 +102,11 @@ type BesuNetworkConfig struct {
MixHash string `json:"mixHash"`
Coinbase string `json:"coinbase"`
Alloc map[string]AccountBalance `json:"alloc,omitempty"`
+ // Metrics configuration
+ MetricsEnabled bool `json:"metricsEnabled"`
+ MetricsHost string `json:"metricsHost"`
+ MetricsPort int `json:"metricsPort"`
+ MetricsProtocol string `json:"metricsProtocol"`
}
// UnmarshalNetworkConfig unmarshals network configuration based on its type
diff --git a/pkg/nodes/besu/besu.go b/pkg/nodes/besu/besu.go
index 611500c..0c61e90 100644
--- a/pkg/nodes/besu/besu.go
+++ b/pkg/nodes/besu/besu.go
@@ -4,6 +4,7 @@ import (
"bufio"
"context"
"fmt"
+ "io"
"os"
"os/exec"
"path/filepath"
@@ -14,6 +15,8 @@ import (
"github.com/chainlaunch/chainlaunch/pkg/logger"
"github.com/chainlaunch/chainlaunch/pkg/networks/service/types"
settingsservice "github.com/chainlaunch/chainlaunch/pkg/settings/service"
+ "github.com/docker/docker/api/types/container"
+ "github.com/docker/docker/client"
"golang.org/x/text/encoding/unicode"
"golang.org/x/text/transform"
)
@@ -88,12 +91,13 @@ func (b *LocalBesu) Start() (interface{}, error) {
// Build command and environment
cmd := b.buildCommand(dataDir, genesisPath, configDir)
- env := b.buildEnvironment()
switch b.mode {
case "service":
+ env := b.buildEnvironment()
return b.startService(cmd, env, dirPath, configDir)
case "docker":
+ env := b.buildDockerEnvironment()
return b.startDocker(env, dataDir, configDir)
default:
return nil, fmt.Errorf("invalid mode: %s", b.mode)
@@ -134,9 +138,20 @@ func (b *LocalBesu) checkPrerequisites() error {
}
case "docker":
- // Check Docker installation
- if err := exec.Command("docker", "--version").Run(); err != nil {
- return fmt.Errorf("Docker is not installed: %w", err)
+ // Check Docker installation using Docker API client
+ cli, err := client.NewClientWithOpts(
+ client.FromEnv,
+ client.WithAPIVersionNegotiation(),
+ )
+ if err != nil {
+ return fmt.Errorf("failed to create Docker client: %w", err)
+ }
+ defer cli.Close()
+
+ // Ping Docker daemon to verify connectivity
+ ctx := context.Background()
+ if _, err := cli.Ping(ctx); err != nil {
+ return fmt.Errorf("Docker daemon is not running or not accessible: %w", err)
}
}
@@ -171,6 +186,10 @@ func (b *LocalBesu) buildCommand(dataDir string, genesisPath string, configDir s
fmt.Sprintf("--network-id=%d", b.opts.ChainID),
"--host-allowlist=*",
fmt.Sprintf("--node-private-key-file=%s", keyPath),
+ fmt.Sprintf("--metrics-enabled=%t", b.opts.MetricsEnabled),
+ "--metrics-host=0.0.0.0",
+ fmt.Sprintf("--metrics-port=%d", b.opts.MetricsPort),
+ fmt.Sprintf("--metrics-protocol=%s", b.opts.MetricsProtocol),
"--p2p-enabled=true",
fmt.Sprintf("--p2p-host=%s", b.opts.P2PHost),
@@ -213,6 +232,21 @@ func (b *LocalBesu) buildEnvironment() map[string]string {
return env
}
+// buildDockerEnvironment builds the environment variables for Besu in Docker
+func (b *LocalBesu) buildDockerEnvironment() map[string]string {
+ env := make(map[string]string)
+
+ // Add custom environment variables from opts
+ for k, v := range b.opts.Env {
+ env[k] = v
+ }
+
+ // Add Java options
+ env["JAVA_OPTS"] = "-Xmx4g"
+
+ return env
+}
+
// Stop stops the Besu node
func (b *LocalBesu) Stop() error {
b.logger.Info("Stopping Besu node", "opts", b.opts)
@@ -399,31 +433,81 @@ func (b *LocalBesu) installBesuMacOS() error {
return nil
}
-func (b *LocalBesu) getLogPath() string {
- return b.GetStdOutPath()
-}
-
// TailLogs tails the logs of the besu service
func (b *LocalBesu) TailLogs(ctx context.Context, tail int, follow bool) (<-chan string, error) {
logChan := make(chan string, 100)
+ if b.mode == "docker" {
+ slugifiedID := strings.ReplaceAll(strings.ToLower(b.opts.ID), " ", "-")
+ containerName := fmt.Sprintf("besu-%s", slugifiedID) // Adjust if you have a helper for container name
+ go func() {
+ defer close(logChan)
+ cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
+ if err != nil {
+ b.logger.Error("Failed to create docker client", "error", err)
+ return
+ }
+ defer cli.Close()
+
+ options := container.LogsOptions{
+ ShowStdout: true,
+ ShowStderr: true,
+ Follow: follow,
+ Details: true,
+ Tail: fmt.Sprintf("%d", tail),
+ }
+ reader, err := cli.ContainerLogs(ctx, containerName, options)
+ if err != nil {
+ b.logger.Error("Failed to get docker logs", "error", err)
+ return
+ }
+ defer reader.Close()
+
+ header := make([]byte, 8)
+ for {
+ _, err := io.ReadFull(reader, header)
+ if err != nil {
+ if err != io.EOF {
+ b.logger.Error("Failed to read docker log header", "error", err)
+ }
+ return
+ }
+ length := int(uint32(header[4])<<24 | uint32(header[5])<<16 | uint32(header[6])<<8 | uint32(header[7]))
+ if length == 0 {
+ continue
+ }
+ payload := make([]byte, length)
+ _, err = io.ReadFull(reader, payload)
+ if err != nil {
+ if err != io.EOF {
+ b.logger.Error("Failed to read docker log payload", "error", err)
+ }
+ return
+ }
+ select {
+ case <-ctx.Done():
+ return
+ case logChan <- string(payload):
+ }
+ }
+ }()
+ return logChan, nil
+ }
+
// Get log file path based on ID
slugifiedID := strings.ReplaceAll(strings.ToLower(b.opts.ID), " ", "-")
logPath := filepath.Join(b.configService.GetDataPath(), "besu", slugifiedID, b.getServiceName()+".log")
- // Check if log file exists
if _, err := os.Stat(logPath); os.IsNotExist(err) {
close(logChan)
return logChan, fmt.Errorf("log file does not exist: %s", logPath)
}
- // Start goroutine to tail logs
go func() {
defer close(logChan)
var cmd *exec.Cmd
if runtime.GOOS == "windows" {
- // For Windows, use PowerShell Get-Content with UTF-8 encoding
if follow {
cmd = exec.Command("powershell", "-NoProfile", "-NonInteractive", "-Command",
"Get-Content", "-Encoding", "UTF8", "-Path", logPath, "-Tail", fmt.Sprintf("%d", tail), "-Wait")
@@ -432,7 +516,6 @@ func (b *LocalBesu) TailLogs(ctx context.Context, tail int, follow bool) (<-chan
"Get-Content", "-Encoding", "UTF8", "-Path", logPath, "-Tail", fmt.Sprintf("%d", tail))
}
} else {
- // For Unix-like systems, use tail command with LC_ALL=en_US.UTF-8
env := os.Environ()
env = append(env, "LC_ALL=en_US.UTF-8")
if follow {
@@ -440,41 +523,35 @@ func (b *LocalBesu) TailLogs(ctx context.Context, tail int, follow bool) (<-chan
} else {
cmd = exec.Command("tail", "-n", fmt.Sprintf("%d", tail), logPath)
}
+ cmd.Env = env
}
- // Create pipe for reading command output
stdout, err := cmd.StdoutPipe()
if err != nil {
b.logger.Error("Failed to create stdout pipe", "error", err)
return
}
- // Start the command
if err := cmd.Start(); err != nil {
b.logger.Error("Failed to start tail command", "error", err)
return
}
- // Create UTF-8 aware scanner to read output line by line
scanner := bufio.NewScanner(transform.NewReader(stdout, unicode.UTF8.NewDecoder()))
scanner.Split(bufio.ScanLines)
- scanner.Buffer(make([]byte, 64*1024), 1024*1024) // Increase buffer size for long lines
+ scanner.Buffer(make([]byte, 64*1024), 1024*1024)
- // Read lines and send to channel
for scanner.Scan() {
select {
case <-ctx.Done():
- // Context cancelled, stop tailing
cmd.Process.Kill()
return
- case logChan <- scanner.Text():
- // Line sent successfully
+ case logChan <- scanner.Text() + "\n":
}
}
- // Wait for command to complete
if err := cmd.Wait(); err != nil {
- if ctx.Err() == nil { // Only log error if context wasn't cancelled
+ if ctx.Err() == nil {
b.logger.Error("Tail command failed", "error", err)
}
}
diff --git a/pkg/nodes/besu/docker.go b/pkg/nodes/besu/docker.go
index 9491dbb..a0a0878 100644
--- a/pkg/nodes/besu/docker.go
+++ b/pkg/nodes/besu/docker.go
@@ -3,6 +3,7 @@ package besu
import (
"context"
"fmt"
+ "io"
"os"
"path/filepath"
"strings"
@@ -44,7 +45,10 @@ func (b *LocalBesu) createVolume(ctx context.Context, cli *client.Client, name s
// startDocker starts the besu node in a docker container
func (b *LocalBesu) startDocker(env map[string]string, dataDir, configDir string) (*StartDockerResponse, error) {
ctx := context.Background()
- cli, err := client.NewClientWithOpts(client.FromEnv)
+ cli, err := client.NewClientWithOpts(
+ client.FromEnv,
+ client.WithAPIVersionNegotiation(),
+ )
if err != nil {
return nil, fmt.Errorf("failed to create docker client: %w", err)
}
@@ -64,7 +68,18 @@ func (b *LocalBesu) startDocker(env map[string]string, dataDir, configDir string
// Prepare container configuration
containerName := b.getContainerName()
imageName := fmt.Sprintf("hyperledger/besu:%s", b.opts.Version)
+ // Pull the image
+ reader, err := cli.ImagePull(ctx, imageName, image.PullOptions{})
+ if err != nil {
+ return nil, fmt.Errorf("failed to pull image %s: %w", imageName, err)
+ }
+ defer reader.Close()
+ // Wait for the pull to complete
+ _, err = io.Copy(io.Discard, reader)
+ if err != nil {
+ return nil, fmt.Errorf("failed to read image pull response: %w", err)
+ }
// Create port bindings
portBindings := nat.PortMap{
nat.Port(fmt.Sprintf("%s/tcp", b.opts.RPCPort)): []nat.PortBinding{
@@ -76,12 +91,15 @@ func (b *LocalBesu) startDocker(env map[string]string, dataDir, configDir string
nat.Port(fmt.Sprintf("%s/udp", b.opts.P2PPort)): []nat.PortBinding{
{HostIP: "0.0.0.0", HostPort: b.opts.P2PPort},
},
+ nat.Port(fmt.Sprintf("%d/tcp", b.opts.MetricsPort)): []nat.PortBinding{
+ {HostIP: "0.0.0.0", HostPort: fmt.Sprintf("%d", b.opts.MetricsPort)},
+ },
}
// Create container config
config := &container.Config{
Image: imageName,
- Cmd: b.buildDockerBesuCommand("/opt/besu/data", "/opt/besu/config"),
+ Cmd: b.buildDockerBesuArgs("/opt/besu/data", "/opt/besu/config"),
Env: formatEnvForDocker(env),
ExposedPorts: nat.PortSet{},
}
@@ -143,7 +161,10 @@ func (b *LocalBesu) startDocker(env map[string]string, dataDir, configDir string
// stopDocker stops the besu docker container
func (b *LocalBesu) stopDocker() error {
ctx := context.Background()
- cli, err := client.NewClientWithOpts(client.FromEnv)
+ cli, err := client.NewClientWithOpts(
+ client.FromEnv,
+ client.WithAPIVersionNegotiation(),
+ )
if err != nil {
return fmt.Errorf("failed to create docker client: %w", err)
}
@@ -190,28 +211,29 @@ func formatEnvForDocker(env map[string]string) []string {
}
// buildBesuCommand builds the command arguments for Besu
-func (b *LocalBesu) buildDockerBesuCommand(dataPath, configPath string) []string {
+func (b *LocalBesu) buildDockerBesuArgs(dataPath, configPath string) []string {
cmd := []string{
- "besu",
- fmt.Sprintf("--network-id=%d", b.opts.ChainID),
fmt.Sprintf("--data-path=%s", dataPath),
fmt.Sprintf("--genesis-file=%s", filepath.Join(configPath, "genesis.json")),
"--rpc-http-enabled",
+ "--rpc-http-api=ETH,NET,QBFT",
+ "--rpc-http-cors-origins=all",
+ "--rpc-http-host=0.0.0.0",
fmt.Sprintf("--rpc-http-port=%s", b.opts.RPCPort),
- fmt.Sprintf("--p2p-port=%s", b.opts.P2PPort),
- "--rpc-http-api=ADMIN,ETH,NET,PERM,QBFT,WEB3,TXPOOL",
- "--host-allowlist=*",
- "--miner-enabled",
- fmt.Sprintf("--miner-coinbase=%s", b.opts.MinerAddress),
"--min-gas-price=1000000000",
- "--rpc-http-cors-origins=all",
+ fmt.Sprintf("--network-id=%d", b.opts.ChainID),
+ "--host-allowlist=*",
fmt.Sprintf("--node-private-key-file=%s", filepath.Join(configPath, "key")),
- fmt.Sprintf("--p2p-host=%s", b.opts.ListenAddress),
- "--rpc-http-host=0.0.0.0",
+ fmt.Sprintf("--metrics-enabled=%t", b.opts.MetricsEnabled),
+ "--metrics-host=0.0.0.0",
+ fmt.Sprintf("--metrics-port=%d", b.opts.MetricsPort),
+ fmt.Sprintf("--metrics-protocol=%s", b.opts.MetricsProtocol),
+ "--p2p-enabled=true",
+ fmt.Sprintf("--p2p-host=%s", b.opts.P2PHost),
+ fmt.Sprintf("--p2p-port=%s", b.opts.P2PPort),
+ "--nat-method=NONE",
"--discovery-enabled=true",
- "--sync-mode=FULL",
- "--revert-reason-enabled=true",
- "--validator-priority-enabled=true",
+ "--profile=ENTERPRISE",
}
// Add bootnodes if specified
diff --git a/pkg/nodes/besu/types.go b/pkg/nodes/besu/types.go
index 406be4e..59e1e4e 100644
--- a/pkg/nodes/besu/types.go
+++ b/pkg/nodes/besu/types.go
@@ -17,6 +17,10 @@ type StartBesuOpts struct {
BootNodes []string `json:"bootNodes"`
Env map[string]string `json:"env"`
Version string `json:"version"`
+ // Metrics configuration
+ MetricsEnabled bool `json:"metricsEnabled"`
+ MetricsPort int64 `json:"metricsPort"`
+ MetricsProtocol string `json:"metricsProtocol"`
}
// BesuConfig represents the configuration for a Besu node
diff --git a/pkg/nodes/http/handler.go b/pkg/nodes/http/handler.go
index 538e0a1..2e598a1 100644
--- a/pkg/nodes/http/handler.go
+++ b/pkg/nodes/http/handler.go
@@ -60,6 +60,7 @@ func (h *NodeHandler) RegisterRoutes(r chi.Router) {
r.Get("/{id}/logs", h.TailLogs)
r.Get("/{id}/events", response.Middleware(h.GetNodeEvents))
r.Get("/{id}/channels", response.Middleware(h.GetNodeChannels))
+ r.Get("/{id}/channels/{channelID}/chaincodes", response.Middleware(h.GetNodeChaincodes))
r.Post("/{id}/certificates/renew", response.Middleware(h.RenewCertificates))
r.Put("/{id}", response.Middleware(h.UpdateNode))
})
@@ -549,7 +550,7 @@ func (h *NodeHandler) TailLogs(w http.ResponseWriter, r *http.Request) {
return
}
// Write log line to response
- fmt.Fprintf(w, "%s\n\n", logLine)
+ fmt.Fprintf(w, "%s", logLine)
flusher.Flush()
}
}
@@ -912,3 +913,66 @@ func (h *NodeHandler) updateFabricOrderer(w http.ResponseWriter, r *http.Request
return response.WriteJSON(w, http.StatusOK, toNodeResponse(updatedNode))
}
+
+// GetNodeChaincodes godoc
+// @Summary Get committed chaincodes for a Fabric peer
+// @Description Retrieves all committed chaincodes for a specific channel on a Fabric peer node
+// @Tags Nodes
+// @Accept json
+// @Produce json
+// @Param id path int true "Node ID"
+// @Param channelID path string true "Channel ID"
+// @Success 200 {array} ChaincodeResponse
+// @Failure 400 {object} response.ErrorResponse "Validation error"
+// @Failure 404 {object} response.ErrorResponse "Node not found"
+// @Failure 500 {object} response.ErrorResponse "Internal server error"
+// @Router /nodes/{id}/channels/{channelID}/chaincodes [get]
+func (h *NodeHandler) GetNodeChaincodes(w http.ResponseWriter, r *http.Request) error {
+ id, err := strconv.ParseInt(chi.URLParam(r, "id"), 10, 64)
+ if err != nil {
+ return errors.NewValidationError("invalid node ID", map[string]interface{}{
+ "error": err.Error(),
+ })
+ }
+
+ channelID := chi.URLParam(r, "channelID")
+ if channelID == "" {
+ return errors.NewValidationError("channel ID is required", nil)
+ }
+
+ chaincodes, err := h.service.GetFabricChaincodes(r.Context(), id, channelID)
+ if err != nil {
+ if err == service.ErrNotFound {
+ return errors.NewNotFoundError("node not found", nil)
+ }
+ if err == service.ErrInvalidNodeType {
+ return errors.NewValidationError("node is not a Fabric peer", nil)
+ }
+ return errors.NewInternalError("failed to get chaincodes", err, nil)
+ }
+
+ // Convert chaincodes to response format
+ chaincodeResponses := make([]ChaincodeResponse, len(chaincodes))
+ for i, cc := range chaincodes {
+ chaincodeResponses[i] = ChaincodeResponse{
+ Name: cc.Name,
+ Version: cc.Version,
+ Sequence: cc.Sequence,
+ EndorsementPlugin: cc.EndorsementPlugin,
+ ValidationPlugin: cc.ValidationPlugin,
+ InitRequired: cc.InitRequired,
+ }
+ }
+
+ return response.WriteJSON(w, http.StatusOK, chaincodeResponses)
+}
+
+// ChaincodeResponse represents a committed chaincode in the response
+type ChaincodeResponse struct {
+ Name string `json:"name"`
+ Version string `json:"version"`
+ Sequence int64 `json:"sequence"`
+ EndorsementPlugin string `json:"endorsementPlugin"`
+ ValidationPlugin string `json:"validationPlugin"`
+ InitRequired bool `json:"initRequired"`
+}
diff --git a/pkg/nodes/http/types.go b/pkg/nodes/http/types.go
index 66d2af6..f284546 100644
--- a/pkg/nodes/http/types.go
+++ b/pkg/nodes/http/types.go
@@ -99,17 +99,19 @@ type NodeConfigResponse struct {
// BesuNodeRequest represents the HTTP request for creating a Besu node
type BesuNodeRequest struct {
- NetworkID uint `json:"networkId" validate:"required"`
- P2PPort uint `json:"p2pPort" validate:"required"`
- RPCPort uint `json:"rpcPort" validate:"required"`
- WSPort uint `json:"wsPort" validate:"required"`
- NodePrivKey string `json:"nodePrivKey,omitempty"`
- Bootnodes []string `json:"bootnodes,omitempty"`
- ExternalIP string `json:"externalIp,omitempty"`
- IsBootnode bool `json:"isBootnode"`
- IsValidator bool `json:"isValidator"`
- StaticNodes []string `json:"staticNodes,omitempty"`
- Env map[string]string `json:"env,omitempty"`
+ NetworkID uint `json:"networkId" validate:"required"`
+ P2PPort uint `json:"p2pPort" validate:"required"`
+ RPCPort uint `json:"rpcPort" validate:"required"`
+ WSPort uint `json:"wsPort" validate:"required"`
+ NodePrivKey string `json:"nodePrivKey,omitempty"`
+ Bootnodes []string `json:"bootnodes,omitempty"`
+ ExternalIP string `json:"externalIp,omitempty"`
+ IsBootnode bool `json:"isBootnode"`
+ IsValidator bool `json:"isValidator"`
+ StaticNodes []string `json:"staticNodes,omitempty"`
+ Env map[string]string `json:"env,omitempty"`
+ MetricsEnabled bool `json:"metricsEnabled"`
+ MetricsPort int64 `json:"metricsPort"`
}
// FabricPeerRequest represents the HTTP request for creating a Fabric peer node
@@ -215,15 +217,17 @@ type UpdateFabricOrdererRequest struct {
// UpdateBesuNodeRequest represents the configuration for updating a Besu node
type UpdateBesuNodeRequest struct {
- NetworkID uint `json:"networkId" validate:"required"`
- P2PHost string `json:"p2pHost" validate:"required"`
- P2PPort uint `json:"p2pPort" validate:"required"`
- RPCHost string `json:"rpcHost" validate:"required"`
- RPCPort uint `json:"rpcPort" validate:"required"`
- Bootnodes []string `json:"bootnodes,omitempty"`
- ExternalIP string `json:"externalIp,omitempty"`
- InternalIP string `json:"internalIp,omitempty"`
- Env map[string]string `json:"env,omitempty"`
+ NetworkID uint `json:"networkId" validate:"required"`
+ P2PHost string `json:"p2pHost" validate:"required"`
+ P2PPort uint `json:"p2pPort" validate:"required"`
+ RPCHost string `json:"rpcHost" validate:"required"`
+ RPCPort uint `json:"rpcPort" validate:"required"`
+ Bootnodes []string `json:"bootnodes,omitempty"`
+ ExternalIP string `json:"externalIp,omitempty"`
+ InternalIP string `json:"internalIp,omitempty"`
+ Env map[string]string `json:"env,omitempty"`
+ MetricsEnabled bool `json:"metricsEnabled"`
+ MetricsPort int64 `json:"metricsPort"`
}
type BesuNodeDefaultsResponse struct {
diff --git a/pkg/nodes/nodetypes/types.go b/pkg/nodes/nodetypes/types.go
new file mode 100644
index 0000000..2fe0812
--- /dev/null
+++ b/pkg/nodes/nodetypes/types.go
@@ -0,0 +1,24 @@
+package nodetypes
+
+import (
+ "time"
+
+ "github.com/chainlaunch/chainlaunch/pkg/nodes/types"
+)
+
+// Node represents a node with its full configuration
+type Node struct {
+ ID int64 `json:"id"`
+ Name string `json:"name"`
+ BlockchainPlatform types.BlockchainPlatform `json:"platform"`
+ NodeType types.NodeType `json:"nodeType"`
+ Status types.NodeStatus `json:"status"`
+ ErrorMessage string `json:"errorMessage"`
+ Endpoint string `json:"endpoint"`
+ PublicEndpoint string `json:"publicEndpoint"`
+ NodeConfig types.NodeConfig `json:"nodeConfig"`
+ DeploymentConfig types.NodeDeploymentConfig `json:"deploymentConfig"`
+ MSPID string `json:"mspId"`
+ CreatedAt time.Time `json:"createdAt"`
+ UpdatedAt time.Time `json:"updatedAt"`
+}
diff --git a/pkg/nodes/orderer/orderer.go b/pkg/nodes/orderer/orderer.go
index c8aa8a5..b5b5d5e 100644
--- a/pkg/nodes/orderer/orderer.go
+++ b/pkg/nodes/orderer/orderer.go
@@ -7,6 +7,7 @@ import (
"crypto/tls"
"crypto/x509"
"fmt"
+ "io"
"net"
"os"
"os/exec"
@@ -25,6 +26,8 @@ import (
"github.com/chainlaunch/chainlaunch/pkg/logger"
"github.com/chainlaunch/chainlaunch/pkg/nodes/types"
settingsservice "github.com/chainlaunch/chainlaunch/pkg/settings/service"
+ "github.com/docker/docker/api/types/container"
+ "github.com/docker/docker/client"
"github.com/hyperledger/fabric-admin-sdk/pkg/channel"
"github.com/hyperledger/fabric-admin-sdk/pkg/identity"
"github.com/hyperledger/fabric-admin-sdk/pkg/network"
@@ -138,19 +141,19 @@ func (o *LocalOrderer) Start() (interface{}, error) {
// Build command and environment
cmd := ordererBinary
- env := o.buildOrdererEnvironment(mspConfigPath)
o.logger.Debug("Starting orderer",
"mode", o.mode,
"cmd", cmd,
- "env", env,
"dirPath", dirPath,
)
switch o.mode {
case "service":
+ env := o.buildOrdererEnvironment(mspConfigPath)
return o.startService(cmd, env, dirPath)
case "docker":
+ env := o.buildDockerOrdererEnvironment(mspConfigPath)
return o.startDocker(env, mspConfigPath, dataConfigPath)
default:
return nil, fmt.Errorf("invalid mode: %s", o.mode)
@@ -222,6 +225,49 @@ func (o *LocalOrderer) buildOrdererEnvironment(mspConfigPath string) map[string]
return env
}
+// buildDockerOrdererEnvironment builds the environment variables for the orderer in docker mode
+func (o *LocalOrderer) buildDockerOrdererEnvironment(mspConfigPath string) map[string]string {
+ env := make(map[string]string)
+
+ // Add custom environment variables from opts
+ for k, v := range o.opts.Env {
+ env[k] = v
+ }
+
+ // Add required environment variables with docker paths
+ env["FABRIC_CFG_PATH"] = "/etc/hyperledger/fabric/msp"
+ env["ORDERER_ADMIN_TLS_CLIENTROOTCAS"] = "/etc/hyperledger/fabric/msp/tlscacerts/cacert.pem"
+ env["ORDERER_ADMIN_TLS_PRIVATEKEY"] = "/etc/hyperledger/fabric/msp/tls.key"
+ env["ORDERER_ADMIN_TLS_CERTIFICATE"] = "/etc/hyperledger/fabric/msp/tls.crt"
+ env["ORDERER_ADMIN_TLS_ROOTCAS"] = "/etc/hyperledger/fabric/msp/tlscacerts/cacert.pem"
+ env["ORDERER_FILELEDGER_LOCATION"] = "/var/hyperledger/production/data"
+ env["ORDERER_GENERAL_CLUSTER_CLIENTCERTIFICATE"] = "/etc/hyperledger/fabric/msp/tls.crt"
+ env["ORDERER_GENERAL_CLUSTER_CLIENTPRIVATEKEY"] = "/etc/hyperledger/fabric/msp/tls.key"
+ env["ORDERER_GENERAL_CLUSTER_ROOTCAS"] = "/etc/hyperledger/fabric/msp/tlscacerts/cacert.pem"
+ env["ORDERER_GENERAL_LOCALMSPDIR"] = "/etc/hyperledger/fabric/msp"
+ env["ORDERER_GENERAL_TLS_CLIENTROOTCAS"] = "/etc/hyperledger/fabric/msp/tlscacerts/cacert.pem"
+ env["ORDERER_GENERAL_TLS_CERTIFICATE"] = "/etc/hyperledger/fabric/msp/tls.crt"
+ env["ORDERER_GENERAL_TLS_PRIVATEKEY"] = "/etc/hyperledger/fabric/msp/tls.key"
+ env["ORDERER_GENERAL_TLS_ROOTCAS"] = "/etc/hyperledger/fabric/msp/tlscacerts/cacert.pem"
+ env["ORDERER_ADMIN_LISTENADDRESS"] = o.opts.AdminListenAddress
+ env["ORDERER_GENERAL_LISTENADDRESS"] = strings.Split(o.opts.ListenAddress, ":")[0]
+ env["ORDERER_OPERATIONS_LISTENADDRESS"] = o.opts.OperationsListenAddress
+ env["ORDERER_GENERAL_LOCALMSPID"] = o.mspID
+ env["ORDERER_GENERAL_LISTENPORT"] = strings.Split(o.opts.ListenAddress, ":")[1]
+ env["ORDERER_ADMIN_TLS_ENABLED"] = "true"
+ env["ORDERER_CHANNELPARTICIPATION_ENABLED"] = "true"
+ env["ORDERER_GENERAL_BOOTSTRAPMETHOD"] = "none"
+ env["ORDERER_GENERAL_GENESISPROFILE"] = "initial"
+ env["ORDERER_GENERAL_LEDGERTYPE"] = "file"
+ env["FABRIC_LOGGING_SPEC"] = "info"
+ env["ORDERER_GENERAL_TLS_CLIENTAUTHREQUIRED"] = "false"
+ env["ORDERER_GENERAL_TLS_ENABLED"] = "true"
+ env["ORDERER_METRICS_PROVIDER"] = "prometheus"
+ env["ORDERER_OPERATIONS_TLS_ENABLED"] = "false"
+
+ return env
+}
+
func (o *LocalOrderer) getLogPath() string {
return o.GetStdOutPath()
}
@@ -229,72 +275,110 @@ func (o *LocalOrderer) getLogPath() string {
// TailLogs tails the logs of the orderer service
func (o *LocalOrderer) TailLogs(ctx context.Context, tail int, follow bool) (<-chan string, error) {
logChan := make(chan string, 100)
- logPath := o.GetStdOutPath()
- // Check if log file exists
+ if o.mode == "docker" {
+ containerName := strings.ReplaceAll(strings.ToLower(o.opts.ID), " ", "-")
+ // You may want to use a helper to get the container name if you have one
+ go func() {
+ defer close(logChan)
+ cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
+ if err != nil {
+ o.logger.Error("Failed to create docker client", "error", err)
+ return
+ }
+ defer cli.Close()
+
+ options := container.LogsOptions{
+ ShowStdout: true,
+ ShowStderr: true,
+ Follow: follow,
+ Details: true,
+ Tail: fmt.Sprintf("%d", tail),
+ }
+ reader, err := cli.ContainerLogs(ctx, containerName, options)
+ if err != nil {
+ o.logger.Error("Failed to get docker logs", "error", err)
+ return
+ }
+ defer reader.Close()
+
+ header := make([]byte, 8)
+ for {
+ _, err := io.ReadFull(reader, header)
+ if err != nil {
+ if err != io.EOF {
+ o.logger.Error("Failed to read docker log header", "error", err)
+ }
+ return
+ }
+ length := int(uint32(header[4])<<24 | uint32(header[5])<<16 | uint32(header[6])<<8 | uint32(header[7]))
+ if length == 0 {
+ continue
+ }
+ payload := make([]byte, length)
+ _, err = io.ReadFull(reader, payload)
+ if err != nil {
+ if err != io.EOF {
+ o.logger.Error("Failed to read docker log payload", "error", err)
+ }
+ return
+ }
+ select {
+ case <-ctx.Done():
+ return
+ case logChan <- string(payload):
+ }
+ }
+ }()
+ return logChan, nil
+ }
+
+ logPath := o.GetStdOutPath()
if _, err := os.Stat(logPath); os.IsNotExist(err) {
close(logChan)
return logChan, fmt.Errorf("log file does not exist: %s", logPath)
}
-
- // Start goroutine to tail logs
go func() {
defer close(logChan)
-
var cmd *exec.Cmd
if runtime.GOOS == "windows" {
- // For Windows, use PowerShell Get-Content
if follow {
cmd = exec.Command("powershell", "Get-Content", "-Path", logPath, "-Tail", fmt.Sprintf("%d", tail), "-Wait")
} else {
cmd = exec.Command("powershell", "Get-Content", "-Path", logPath, "-Tail", fmt.Sprintf("%d", tail))
}
} else {
- // For Unix-like systems, use tail command
if follow {
cmd = exec.Command("tail", "-n", fmt.Sprintf("%d", tail), "-f", logPath)
} else {
cmd = exec.Command("tail", "-n", fmt.Sprintf("%d", tail), logPath)
}
}
-
- // Create pipe for reading command output
stdout, err := cmd.StdoutPipe()
if err != nil {
o.logger.Error("Failed to create stdout pipe", "error", err)
return
}
-
- // Start the command
if err := cmd.Start(); err != nil {
o.logger.Error("Failed to start tail command", "error", err)
return
}
-
- // Create scanner to read output line by line
scanner := bufio.NewScanner(stdout)
scanner.Split(bufio.ScanLines)
-
- // Read lines and send to channel
for scanner.Scan() {
select {
case <-ctx.Done():
- // Context cancelled, stop tailing
cmd.Process.Kill()
return
- case logChan <- scanner.Text():
- // Line sent successfully
+ case logChan <- scanner.Text() + "\n":
}
}
-
- // Wait for command to complete
if err := cmd.Wait(); err != nil {
- if ctx.Err() == nil { // Only log error if context wasn't cancelled
+ if ctx.Err() == nil {
o.logger.Error("Tail command failed", "error", err)
}
}
}()
-
return logChan, nil
}
diff --git a/pkg/nodes/orderer/service.go b/pkg/nodes/orderer/service.go
index dddbf08..23ac8da 100644
--- a/pkg/nodes/orderer/service.go
+++ b/pkg/nodes/orderer/service.go
@@ -2,12 +2,21 @@ package orderer
import (
"bytes"
+ "context"
"fmt"
+ "io"
"os"
"os/exec"
"path/filepath"
"runtime"
+ "strings"
"text/template"
+
+ "github.com/docker/docker/api/types/container"
+ "github.com/docker/docker/api/types/image"
+ "github.com/docker/docker/api/types/mount"
+ dockerclient "github.com/docker/docker/client"
+ "github.com/docker/go-connections/nat"
)
// startService starts the orderer as a system service
@@ -251,14 +260,130 @@ func (o *LocalOrderer) execSystemctl(command string, args ...string) error {
return nil
}
+// getContainerName returns the docker container name for the orderer
+func (o *LocalOrderer) getContainerName() string {
+ return strings.ReplaceAll(strings.ToLower(o.opts.ID), " ", "-")
+}
+
// startDocker starts the orderer in a docker container
func (o *LocalOrderer) startDocker(env map[string]string, mspConfigPath, dataConfigPath string) (*StartDockerResponse, error) {
- // TODO: Implement docker mode
- return nil, fmt.Errorf("docker mode not implemented")
+ cli, err := dockerclient.NewClientWithOpts(
+ dockerclient.FromEnv,
+ dockerclient.WithAPIVersionNegotiation(),
+ )
+ if err != nil {
+ return nil, fmt.Errorf("failed to create docker client: %w", err)
+ }
+ defer cli.Close()
+
+ // Pull the image first
+ imageName := fmt.Sprintf("hyperledger/fabric-orderer:%s", o.opts.Version)
+ reader, err := cli.ImagePull(context.Background(), imageName, image.PullOptions{})
+ if err != nil {
+ return nil, fmt.Errorf("failed to pull image %s: %w", imageName, err)
+ }
+ defer reader.Close()
+ io.Copy(io.Discard, reader) // Wait for pull to complete
+
+ containerName := o.getContainerName()
+
+ // Helper to extract port from address (host:port or just :port)
+ extractPort := func(addr string) string {
+ parts := strings.Split(addr, ":")
+ if len(parts) > 1 {
+ return parts[len(parts)-1]
+ }
+ return addr
+ }
+
+ listenPort := extractPort(o.opts.ListenAddress)
+ adminPort := extractPort(o.opts.AdminListenAddress)
+ operationsPort := extractPort(o.opts.OperationsListenAddress)
+
+ // Configure port bindings
+ portBindings := map[nat.Port][]nat.PortBinding{
+ nat.Port(listenPort): {{HostIP: "0.0.0.0", HostPort: listenPort}},
+ nat.Port(adminPort): {{HostIP: "0.0.0.0", HostPort: adminPort}},
+ nat.Port(operationsPort): {{HostIP: "0.0.0.0", HostPort: operationsPort}},
+ }
+
+ // Configure volume bindings
+ mounts := []mount.Mount{
+ {
+ Type: mount.TypeBind,
+ Source: mspConfigPath,
+ Target: "/etc/hyperledger/fabric/msp",
+ },
+ {
+ Type: mount.TypeBind,
+ Source: dataConfigPath,
+ Target: "/var/hyperledger/production",
+ },
+ }
+ containerConfig := &container.Config{
+ Image: imageName,
+ Cmd: []string{"orderer"},
+ Env: mapToEnvSlice(env),
+ ExposedPorts: map[nat.Port]struct{}{},
+ }
+ for port := range portBindings {
+ containerConfig.ExposedPorts[port] = struct{}{}
+ }
+ // Create container
+ resp, err := cli.ContainerCreate(context.Background(),
+ containerConfig,
+ &container.HostConfig{
+ PortBindings: portBindings,
+ Mounts: mounts,
+ },
+ nil,
+ nil,
+ containerName,
+ )
+ if err != nil {
+ return nil, fmt.Errorf("failed to create container: %w", err)
+ }
+
+ // Start container
+ if err := cli.ContainerStart(context.Background(), resp.ID, container.StartOptions{}); err != nil {
+ return nil, fmt.Errorf("failed to start container: %w", err)
+ }
+
+ return &StartDockerResponse{
+ Mode: "docker",
+ ContainerName: containerName,
+ }, nil
+}
+
+func mapToEnvSlice(m map[string]string) []string {
+ var env []string
+ for k, v := range m {
+ env = append(env, fmt.Sprintf("%s=%s", k, v))
+ }
+ return env
}
-// stopDocker stops the orderer docker container
func (o *LocalOrderer) stopDocker() error {
- // TODO: Implement docker mode
- return fmt.Errorf("docker mode not implemented")
+ containerName := o.getContainerName()
+
+ cli, err := dockerclient.NewClientWithOpts(
+ dockerclient.FromEnv,
+ dockerclient.WithAPIVersionNegotiation(),
+ )
+ if err != nil {
+ return fmt.Errorf("failed to create docker client: %w", err)
+ }
+ defer cli.Close()
+
+ ctx := context.Background()
+
+ // Stop and remove container
+ if err := cli.ContainerRemove(ctx, containerName, container.RemoveOptions{
+ Force: true,
+ }); err != nil {
+ o.logger.Warn("Failed to remove docker container", "error", err)
+ // Don't return error as container might not exist
+ }
+
+ return nil
}
diff --git a/pkg/nodes/peer/peer.go b/pkg/nodes/peer/peer.go
index fb2a55e..97dcc36 100644
--- a/pkg/nodes/peer/peer.go
+++ b/pkg/nodes/peer/peer.go
@@ -19,6 +19,7 @@ import (
// add sprig/v3
"github.com/Masterminds/sprig/v3"
+ "github.com/hyperledger/fabric-admin-sdk/pkg/chaincode"
"github.com/hyperledger/fabric-admin-sdk/pkg/channel"
"github.com/hyperledger/fabric-admin-sdk/pkg/identity"
"github.com/hyperledger/fabric-admin-sdk/pkg/network"
@@ -27,9 +28,12 @@ import (
gwidentity "github.com/hyperledger/fabric-gateway/pkg/identity"
cb "github.com/hyperledger/fabric-protos-go-apiv2/common"
"github.com/hyperledger/fabric-protos-go-apiv2/orderer"
+ "github.com/hyperledger/fabric-protos-go-apiv2/peer/lifecycle"
"google.golang.org/grpc"
"google.golang.org/protobuf/proto"
+ "io"
+
"github.com/chainlaunch/chainlaunch/internal/protoutil"
"github.com/chainlaunch/chainlaunch/pkg/binaries"
"github.com/chainlaunch/chainlaunch/pkg/config"
@@ -38,8 +42,13 @@ import (
kmodels "github.com/chainlaunch/chainlaunch/pkg/keymanagement/models"
keymanagement "github.com/chainlaunch/chainlaunch/pkg/keymanagement/service"
"github.com/chainlaunch/chainlaunch/pkg/logger"
- "github.com/chainlaunch/chainlaunch/pkg/nodes/types"
+ nodetypes "github.com/chainlaunch/chainlaunch/pkg/nodes/types"
settingsservice "github.com/chainlaunch/chainlaunch/pkg/settings/service"
+ "github.com/docker/docker/api/types/container"
+ "github.com/docker/docker/api/types/image"
+ "github.com/docker/docker/api/types/mount"
+ dockerclient "github.com/docker/docker/client"
+ "github.com/docker/go-connections/nat"
)
type AddressOverridePath struct {
@@ -886,7 +895,7 @@ func (p *LocalPeer) findPeerBinary() (string, error) {
}
// Init initializes the peer configuration
-func (p *LocalPeer) Init() (types.NodeDeploymentConfig, error) {
+func (p *LocalPeer) Init() (nodetypes.NodeDeploymentConfig, error) {
ctx := context.Background()
// Get node from database
node, err := p.db.GetNode(ctx, p.nodeID)
@@ -1046,8 +1055,8 @@ func (p *LocalPeer) Init() (types.NodeDeploymentConfig, error) {
return nil, fmt.Errorf("failed to write config files: %w", err)
}
- return &types.FabricPeerDeploymentConfig{
- BaseDeploymentConfig: types.BaseDeploymentConfig{
+ return &nodetypes.FabricPeerDeploymentConfig{
+ BaseDeploymentConfig: nodetypes.BaseDeploymentConfig{
Type: "fabric-peer",
Mode: p.mode,
},
@@ -1137,6 +1146,7 @@ func (p *LocalPeer) buildPeerEnvironment(mspConfigPath string) map[string]string
}
// Add required environment variables
+ // Default: use host paths
env["CORE_PEER_MSPCONFIGPATH"] = mspConfigPath
env["FABRIC_CFG_PATH"] = mspConfigPath
env["CORE_PEER_TLS_ROOTCERT_FILE"] = filepath.Join(mspConfigPath, "tlscacerts/cacert.pem")
@@ -1178,48 +1188,109 @@ func (p *LocalPeer) buildPeerEnvironment(mspConfigPath string) map[string]string
env["CORE_LOGGING_GRPC"] = "info"
env["CORE_LOGGING_PEER"] = "info"
+ // If running in docker mode, override file paths to container paths
+ if p.mode == "docker" {
+ env["CORE_PEER_MSPCONFIGPATH"] = "/etc/hyperledger/fabric/msp"
+ env["FABRIC_CFG_PATH"] = "/etc/hyperledger/fabric/msp"
+ env["CORE_PEER_TLS_ROOTCERT_FILE"] = "/etc/hyperledger/fabric/msp/tlscacerts/cacert.pem"
+ env["CORE_PEER_TLS_KEY_FILE"] = "/etc/hyperledger/fabric/msp/tls.key"
+ env["CORE_PEER_TLS_CLIENTCERT_FILE"] = "/etc/hyperledger/fabric/msp/tls.crt"
+ env["CORE_PEER_TLS_CLIENTKEY_FILE"] = "/etc/hyperledger/fabric/msp/tls.key"
+ env["CORE_PEER_TLS_CERT_FILE"] = "/etc/hyperledger/fabric/msp/tls.crt"
+ env["CORE_PEER_TLS_CLIENTROOTCAS_FILES"] = "/etc/hyperledger/fabric/msp/tlscacerts/cacert.pem"
+ }
+
return env
}
// startDocker starts the peer in a docker container
func (p *LocalPeer) startDocker(env map[string]string, mspConfigPath, dataConfigPath string) (*StartDockerResponse, error) {
- // Convert env map to array of "-e KEY=VALUE" arguments
- var envArgs []string
- for k, v := range env {
- envArgs = append(envArgs, "-e", fmt.Sprintf("%s=%s", k, v))
+ cli, err := dockerclient.NewClientWithOpts(
+ dockerclient.FromEnv,
+ dockerclient.WithAPIVersionNegotiation(),
+ )
+ if err != nil {
+ return nil, fmt.Errorf("failed to create docker client: %w", err)
}
+ defer cli.Close()
+
+ // Pull the image first
+ imageName := fmt.Sprintf("hyperledger/fabric-peer:%s", p.opts.Version)
+ reader, err := cli.ImagePull(context.Background(), imageName, image.PullOptions{})
+ if err != nil {
+ return nil, fmt.Errorf("failed to pull image %s: %w", imageName, err)
+ }
+ defer reader.Close()
+ io.Copy(io.Discard, reader) // Wait for pull to complete
containerName, err := p.getContainerName()
if err != nil {
return nil, fmt.Errorf("failed to get container name: %w", err)
}
- // Prepare docker run command arguments
- args := []string{
- "run",
- "-d",
- "--name", containerName,
- }
- args = append(args, envArgs...)
- args = append(args,
- "-v", fmt.Sprintf("%s:/etc/hyperledger/fabric/msp", mspConfigPath),
- "-v", fmt.Sprintf("%s:/var/hyperledger/production", dataConfigPath),
- "-p", fmt.Sprintf("%s:7051", strings.Split(p.opts.ListenAddress, ":")[1]),
- "-p", fmt.Sprintf("%s:7052", strings.Split(p.opts.ChaincodeAddress, ":")[1]),
- "-p", fmt.Sprintf("%s:7053", strings.Split(p.opts.EventsAddress, ":")[1]),
- "-p", fmt.Sprintf("%s:9443", strings.Split(p.opts.OperationsListenAddress, ":")[1]),
- "hyperledger/fabric-peer:2.5.9",
- "peer",
- "node",
- "start",
- )
+ // Helper to extract port from address (host:port or just :port)
+ extractPort := func(addr string) string {
+ parts := strings.Split(addr, ":")
+ if len(parts) > 1 {
+ return parts[len(parts)-1]
+ }
+ return addr
+ }
- cmd := exec.Command("docker", args...)
- cmd.Stdout = os.Stdout
- cmd.Stderr = os.Stderr
+ peerPort := extractPort(p.opts.ListenAddress)
+ chaincodePort := extractPort(p.opts.ChaincodeAddress)
+ eventsPort := extractPort(p.opts.EventsAddress)
+ operationsPort := extractPort(p.opts.OperationsListenAddress)
- if err := cmd.Run(); err != nil {
- return nil, fmt.Errorf("failed to start docker container: %w", err)
+ // Configure port bindings
+ portBindings := map[nat.Port][]nat.PortBinding{
+ nat.Port(peerPort): {{HostIP: "0.0.0.0", HostPort: peerPort}},
+ nat.Port(chaincodePort): {{HostIP: "0.0.0.0", HostPort: chaincodePort}},
+ nat.Port(eventsPort): {{HostIP: "0.0.0.0", HostPort: eventsPort}},
+ nat.Port(operationsPort): {{HostIP: "0.0.0.0", HostPort: operationsPort}},
+ }
+
+ // Configure volume bindings
+ mounts := []mount.Mount{
+ {
+ Type: mount.TypeBind,
+ Source: mspConfigPath,
+ Target: "/etc/hyperledger/fabric/msp",
+ },
+ {
+ Type: mount.TypeBind,
+ Source: dataConfigPath,
+ Target: "/var/hyperledger/production",
+ },
+ }
+ containerConfig := &container.Config{
+ Image: imageName,
+ Cmd: []string{"peer", "node", "start"},
+ Env: mapToEnvSlice(env),
+ ExposedPorts: map[nat.Port]struct{}{},
+ }
+ for port := range portBindings {
+ containerConfig.ExposedPorts[port] = struct{}{}
+ }
+
+ // Create container
+ resp, err := cli.ContainerCreate(context.Background(),
+ containerConfig,
+ &container.HostConfig{
+ PortBindings: portBindings,
+ Mounts: mounts,
+ },
+ nil,
+ nil,
+ containerName,
+ )
+ if err != nil {
+ return nil, fmt.Errorf("failed to create container: %w", err)
+ }
+
+ // Start container
+ if err := cli.ContainerStart(context.Background(), resp.ID, container.StartOptions{}); err != nil {
+ return nil, fmt.Errorf("failed to start container: %w", err)
}
return &StartDockerResponse{
@@ -1228,6 +1299,15 @@ func (p *LocalPeer) startDocker(env map[string]string, mspConfigPath, dataConfig
}, nil
}
+// Helper function to convert map to env slice
+func mapToEnvSlice(m map[string]string) []string {
+ var env []string
+ for k, v := range m {
+ env = append(env, fmt.Sprintf("%s=%s", k, v))
+ }
+ return env
+}
+
// Stop stops the peer node
func (p *LocalPeer) Stop() error {
if p.mode == "service" {
@@ -1269,17 +1349,23 @@ func (p *LocalPeer) stopDocker() error {
return fmt.Errorf("failed to get container name: %w", err)
}
- // Stop the container
- stopCmd := exec.Command("docker", "stop", containerName)
- if err := stopCmd.Run(); err != nil {
- return fmt.Errorf("failed to stop docker container: %w", err)
+ cli, err := dockerclient.NewClientWithOpts(
+ dockerclient.FromEnv,
+ dockerclient.WithAPIVersionNegotiation(),
+ )
+ if err != nil {
+ return fmt.Errorf("failed to create docker client: %w", err)
}
+ defer cli.Close()
- // Remove the container
- rmCmd := exec.Command("docker", "rm", "-f", containerName)
- if err := rmCmd.Run(); err != nil {
+ ctx := context.Background()
+
+ // Stop and remove container
+ if err := cli.ContainerRemove(ctx, containerName, container.RemoveOptions{
+ Force: true,
+ }); err != nil {
p.logger.Warn("Failed to remove docker container", "error", err)
- // Don't return error as the container might not exist
+ // Don't return error as container might not exist
}
return nil
@@ -1360,7 +1446,7 @@ func (p *LocalPeer) execSystemctl(command string, args ...string) error {
}
// RenewCertificates renews the peer's TLS and signing certificates
-func (p *LocalPeer) RenewCertificates(peerDeploymentConfig *types.FabricPeerDeploymentConfig) error {
+func (p *LocalPeer) RenewCertificates(peerDeploymentConfig *nodetypes.FabricPeerDeploymentConfig) error {
ctx := context.Background()
p.logger.Info("Starting certificate renewal for peer", "peerID", p.opts.ID)
@@ -1501,12 +1587,6 @@ func (p *LocalPeer) RenewCertificates(peerDeploymentConfig *types.FabricPeerDepl
return fmt.Errorf("failed to write renewed certificates: %w", err)
}
- // Restart the peer
- _, err = p.Start()
- if err != nil {
- return fmt.Errorf("failed to restart peer after certificate renewal: %w", err)
- }
-
p.logger.Info("Successfully renewed peer certificates", "peerID", p.opts.ID)
p.logger.Info("Restarting peer after certificate renewal")
// Stop the peer before renewing certificates
@@ -1911,6 +1991,18 @@ const configYamlContent = `NodeOUs:
OrganizationalUnitIdentifier: orderer
`
+type CoreTemplateData struct {
+ PeerID string
+ ListenAddress string
+ ChaincodeAddress string
+ ExternalEndpoint string
+ DataPath string
+ MSPID string
+ ExternalBuilderPath string
+ OperationsListenAddress string
+ AddressOverrides []AddressOverridePath
+}
+
// writeConfigFiles writes the config.yaml and core.yaml files
func (p *LocalPeer) writeConfigFiles(mspConfigPath, dataConfigPath string) error {
// Write config.yaml
@@ -1921,30 +2013,33 @@ func (p *LocalPeer) writeConfigFiles(mspConfigPath, dataConfigPath string) error
if err != nil {
return fmt.Errorf("failed to convert address overrides: %w", err)
}
-
- // Define template data
- data := struct {
- PeerID string
- ListenAddress string
- ChaincodeAddress string
- ExternalEndpoint string
- DataPath string
- MSPID string
- ExternalBuilderPath string
- OperationsListenAddress string
- AddressOverrides []AddressOverridePath
- }{
- PeerID: p.opts.ID,
- ListenAddress: p.opts.ListenAddress,
- ChaincodeAddress: p.opts.ChaincodeAddress,
- ExternalEndpoint: p.opts.ExternalEndpoint,
- DataPath: dataConfigPath,
- MSPID: p.mspID,
- ExternalBuilderPath: filepath.Join(mspConfigPath, "ccaas"),
- OperationsListenAddress: p.opts.OperationsListenAddress,
- AddressOverrides: convertedOverrides,
+ var data CoreTemplateData
+ if p.mode == "docker" {
+ data = CoreTemplateData{
+ PeerID: p.opts.ID,
+ ListenAddress: p.opts.ListenAddress,
+ ChaincodeAddress: p.opts.ChaincodeAddress,
+ ExternalEndpoint: p.opts.ExternalEndpoint,
+ DataPath: "/var/hyperledger/production",
+ ExternalBuilderPath: "/opt/hyperledger/ccaas_builder",
+ OperationsListenAddress: p.opts.OperationsListenAddress,
+ AddressOverrides: convertedOverrides,
+ MSPID: p.mspID,
+ }
+ } else {
+ // Define template data
+ data = CoreTemplateData{
+ PeerID: p.opts.ID,
+ ListenAddress: p.opts.ListenAddress,
+ ChaincodeAddress: p.opts.ChaincodeAddress,
+ ExternalEndpoint: p.opts.ExternalEndpoint,
+ DataPath: dataConfigPath,
+ MSPID: p.mspID,
+ ExternalBuilderPath: filepath.Join(mspConfigPath, "ccaas"),
+ OperationsListenAddress: p.opts.OperationsListenAddress,
+ AddressOverrides: convertedOverrides,
+ }
}
-
// Create template
tmpl, err := template.New("core.yaml").Parse(coreYamlTemplate)
if err != nil {
@@ -1972,18 +2067,80 @@ func (p *LocalPeer) getLogPath() string {
// TailLogs tails the logs of the peer service
func (p *LocalPeer) TailLogs(ctx context.Context, tail int, follow bool) (<-chan string, error) {
logChan := make(chan string, 100)
- logPath := p.GetStdOutPath()
+ if p.mode == "docker" {
+ containerName, err := p.getContainerName()
+ if err != nil {
+ close(logChan)
+ return logChan, err
+ }
+ go func() {
+ defer close(logChan)
+ cli, err := dockerclient.NewClientWithOpts(dockerclient.FromEnv, dockerclient.WithAPIVersionNegotiation())
+ if err != nil {
+ p.logger.Error("Failed to create docker client", "error", err)
+ return
+ }
+ defer cli.Close()
+
+ options := container.LogsOptions{
+ ShowStdout: true,
+ ShowStderr: true,
+ Follow: follow,
+ Details: true,
+ Tail: fmt.Sprintf("%d", tail),
+ }
+ reader, err := cli.ContainerLogs(ctx, containerName, options)
+ if err != nil {
+ p.logger.Error("Failed to get docker logs", "error", err)
+ return
+ }
+ defer reader.Close()
+
+ header := make([]byte, 8)
+ for {
+ // Read the 8-byte header
+ _, err := io.ReadFull(reader, header)
+ if err != nil {
+ if err != io.EOF {
+ p.logger.Error("Failed to read docker log header", "error", err)
+ }
+ return
+ }
+ // Get the payload length
+ length := int(uint32(header[4])<<24 | uint32(header[5])<<16 | uint32(header[6])<<8 | uint32(header[7]))
+ if length == 0 {
+ continue
+ }
+ // Read the payload
+ payload := make([]byte, length)
+ _, err = io.ReadFull(reader, payload)
+ if err != nil {
+ if err != io.EOF {
+ p.logger.Error("Failed to read docker log payload", "error", err)
+ }
+ return
+ }
+
+ select {
+ case <-ctx.Done():
+ return
+ case logChan <- string(payload):
+ }
+ }
+ }()
+ return logChan, nil
+ }
+
+ // Service mode: use file tailing
+ logPath := p.GetStdOutPath()
// Check if log file exists
if _, err := os.Stat(logPath); os.IsNotExist(err) {
close(logChan)
return logChan, fmt.Errorf("log file does not exist: %s", logPath)
}
-
- // Start goroutine to tail logs
go func() {
defer close(logChan)
-
var cmd *exec.Cmd
if runtime.GOOS == "windows" {
// For Windows, use PowerShell Get-Content
@@ -2000,44 +2157,31 @@ func (p *LocalPeer) TailLogs(ctx context.Context, tail int, follow bool) (<-chan
cmd = exec.Command("tail", "-n", fmt.Sprintf("%d", tail), logPath)
}
}
-
- // Create pipe for reading command output
stdout, err := cmd.StdoutPipe()
if err != nil {
p.logger.Error("Failed to create stdout pipe", "error", err)
return
}
-
- // Start the command
if err := cmd.Start(); err != nil {
p.logger.Error("Failed to start tail command", "error", err)
return
}
-
- // Create scanner to read output line by line
scanner := bufio.NewScanner(stdout)
scanner.Split(bufio.ScanLines)
-
- // Read lines and send to channel
for scanner.Scan() {
select {
case <-ctx.Done():
- // Context cancelled, stop tailing
cmd.Process.Kill()
return
- case logChan <- scanner.Text():
- // Line sent successfully
+ case logChan <- scanner.Text() + "\n":
}
}
-
- // Wait for command to complete
if err := cmd.Wait(); err != nil {
- if ctx.Err() == nil { // Only log error if context wasn't cancelled
+ if ctx.Err() == nil {
p.logger.Error("Tail command failed", "error", err)
}
}
}()
-
return logChan, nil
}
@@ -2329,7 +2473,6 @@ func SignConfigTx(channelID string, envConfigUpdate *cb.Envelope, signer identit
return protoutil.CreateSignedEnvelope(cb.HeaderType_CONFIG_UPDATE, channelID, signer, configUpdateEnv, msgVersion, epoch)
}
-
func Concatenate[T any](slices ...[]T) []T {
size := 0
for _, slice := range slices {
@@ -2727,8 +2870,6 @@ func (p *LocalPeer) GetBlockTransactions(ctx context.Context, channelID string,
return nil, fmt.Errorf("block not found")
}
-
-
// GetBlocksInRange retrieves blocks from startBlock to endBlock (inclusive)
func (p *LocalPeer) GetBlocksInRange(ctx context.Context, channelID string, startBlock, endBlock uint64) ([]*cb.Block, error) {
peerUrl := p.GetPeerAddress()
@@ -2896,7 +3037,7 @@ func (p *LocalPeer) GetChannelInfoOnPeer(ctx context.Context, channelID string)
}
// SynchronizeConfig synchronizes the peer's configuration files and service
-func (p *LocalPeer) SynchronizeConfig(deployConfig *types.FabricPeerDeploymentConfig) error {
+func (p *LocalPeer) SynchronizeConfig(deployConfig *nodetypes.FabricPeerDeploymentConfig) error {
slugifiedID := strings.ReplaceAll(strings.ToLower(p.opts.ID), " ", "-")
dirPath := filepath.Join(p.configService.GetDataPath(), "peers", slugifiedID)
mspConfigPath := filepath.Join(dirPath, "config")
@@ -2910,28 +3051,34 @@ func (p *LocalPeer) SynchronizeConfig(deployConfig *types.FabricPeerDeploymentCo
return fmt.Errorf("failed to convert address overrides: %w", err)
}
- // Define template data
- data := struct {
- PeerID string
- ListenAddress string
- ChaincodeAddress string
- ExternalEndpoint string
- DataPath string
- MSPID string
- ExternalBuilderPath string
- OperationsListenAddress string
- AddressOverrides []AddressOverridePath
- }{
- PeerID: p.opts.ID,
- ListenAddress: deployConfig.ListenAddress,
- ChaincodeAddress: deployConfig.ChaincodeAddress,
- ExternalEndpoint: deployConfig.ExternalEndpoint,
- DataPath: dataConfigPath,
- MSPID: deployConfig.MSPID,
- ExternalBuilderPath: filepath.Join(mspConfigPath, "ccaas"),
- OperationsListenAddress: deployConfig.OperationsListenAddress,
- AddressOverrides: convertedOverrides,
+ var data CoreTemplateData
+ if p.mode == "docker" {
+ data = CoreTemplateData{
+ PeerID: p.opts.ID,
+ ListenAddress: p.opts.ListenAddress,
+ ChaincodeAddress: p.opts.ChaincodeAddress,
+ ExternalEndpoint: p.opts.ExternalEndpoint,
+ DataPath: "/var/hyperledger/production",
+ ExternalBuilderPath: "/opt/hyperledger/ccaas_builder",
+ OperationsListenAddress: p.opts.OperationsListenAddress,
+ AddressOverrides: convertedOverrides,
+ MSPID: p.mspID,
+ }
+ } else {
+ // Define template data
+ data = CoreTemplateData{
+ PeerID: p.opts.ID,
+ ListenAddress: p.opts.ListenAddress,
+ ChaincodeAddress: p.opts.ChaincodeAddress,
+ ExternalEndpoint: p.opts.ExternalEndpoint,
+ DataPath: dataConfigPath,
+ MSPID: p.mspID,
+ ExternalBuilderPath: filepath.Join(mspConfigPath, "ccaas"),
+ OperationsListenAddress: p.opts.OperationsListenAddress,
+ AddressOverrides: convertedOverrides,
+ }
}
+
// Create template
tmpl, err := template.New("core.yaml").Parse(coreYamlTemplate)
if err != nil {
@@ -2962,7 +3109,7 @@ func (p *LocalPeer) SynchronizeConfig(deployConfig *types.FabricPeerDeploymentCo
}
// Add this new function
-func (p *LocalPeer) convertAddressOverrides(mspConfigPath string, overrides []types.AddressOverride) ([]AddressOverridePath, error) {
+func (p *LocalPeer) convertAddressOverrides(mspConfigPath string, overrides []nodetypes.AddressOverride) ([]AddressOverridePath, error) {
// Create temporary directory for override certificates
tmpDir := filepath.Join(mspConfigPath, "orderer-overrides")
if err := os.MkdirAll(tmpDir, 0755); err != nil {
@@ -2987,3 +3134,30 @@ func (p *LocalPeer) convertAddressOverrides(mspConfigPath string, overrides []ty
return convertedOverrides, nil
}
+
+func (p *LocalPeer) GetCommittedChaincodes(ctx context.Context, channelID string) ([]*lifecycle.QueryChaincodeDefinitionsResult_ChaincodeDefinition, error) {
+ peerUrl := p.GetPeerAddress()
+ tlsCACert, err := p.GetTLSRootCACert(ctx)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get TLS CA cert: %w", err)
+ }
+
+ peerConn, err := p.CreatePeerConnection(ctx, peerUrl, tlsCACert)
+ if err != nil {
+ return nil, fmt.Errorf("failed to create peer connection: %w", err)
+ }
+ defer peerConn.Close()
+
+ adminIdentity, _, err := p.GetAdminIdentity(ctx)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get admin identity: %w", err)
+ }
+
+ peer := chaincode.NewGateway(peerConn, adminIdentity)
+ committedChaincodes, err := peer.QueryCommitted(ctx, channelID)
+ if err != nil {
+ return nil, fmt.Errorf("failed to query committed chaincodes: %w", err)
+ }
+
+ return committedChaincodes.GetChaincodeDefinitions(), nil
+}
diff --git a/pkg/nodes/service/besu.go b/pkg/nodes/service/besu.go
new file mode 100644
index 0000000..dcbb3f0
--- /dev/null
+++ b/pkg/nodes/service/besu.go
@@ -0,0 +1,641 @@
+package service
+
+import (
+ "context"
+ "database/sql"
+ "encoding/json"
+ "fmt"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strings"
+
+ "github.com/chainlaunch/chainlaunch/pkg/db"
+ "github.com/chainlaunch/chainlaunch/pkg/errors"
+ networktypes "github.com/chainlaunch/chainlaunch/pkg/networks/service/types"
+ "github.com/chainlaunch/chainlaunch/pkg/nodes/besu"
+ "github.com/chainlaunch/chainlaunch/pkg/nodes/types"
+ "github.com/chainlaunch/chainlaunch/pkg/nodes/utils"
+)
+
+// GetBesuPorts attempts to find available ports for P2P and RPC, starting from default ports
+func GetBesuPorts(baseP2PPort, baseRPCPort uint) (p2pPort uint, rpcPort uint, err error) {
+ maxAttempts := 100
+ // Try to find available ports for P2P and RPC
+ p2pPorts, err := findConsecutivePorts(int(baseP2PPort), 1, int(baseP2PPort)+maxAttempts)
+ if err != nil {
+ return 0, 0, fmt.Errorf("could not find available P2P port: %w", err)
+ }
+ p2pPort = uint(p2pPorts[0])
+
+ rpcPorts, err := findConsecutivePorts(int(baseRPCPort), 1, int(baseRPCPort)+maxAttempts)
+ if err != nil {
+ return 0, 0, fmt.Errorf("could not find available RPC port: %w", err)
+ }
+ rpcPort = uint(rpcPorts[0])
+
+ return p2pPort, rpcPort, nil
+}
+
+// GetBesuNodeDefaults returns the default configuration for Besu nodes
+func (s *NodeService) GetBesuNodeDefaults(besuNodes int) ([]BesuNodeDefaults, error) {
+ // Validate node count
+ if besuNodes <= 0 {
+ besuNodes = 1
+ }
+ if besuNodes > 15 {
+ return nil, fmt.Errorf("besu node count exceeds maximum supported nodes (15)")
+ }
+
+ // Get external IP for p2p communication
+ externalIP, err := s.GetExternalIP()
+ if err != nil {
+ return nil, fmt.Errorf("failed to get external IP: %w", err)
+ }
+
+ // Use localhost for internal IP
+ internalIP := "127.0.0.1"
+
+ // Base ports for Besu nodes with sufficient spacing
+ const (
+ baseP2PPort = 30303 // Starting P2P port
+ baseRPCPort = 8545 // Starting RPC port
+ baseMetricsPort = 9545 // Starting metrics port
+ portOffset = 100 // Each node gets a 100 port range
+ )
+
+ // Create array to hold all node defaults
+ nodeDefaults := make([]BesuNodeDefaults, besuNodes)
+
+ // Generate defaults for each node
+ for i := 0; i < besuNodes; i++ {
+ // Try to get ports for each node
+ p2pPort, rpcPort, err := GetBesuPorts(
+ uint(baseP2PPort+(i*portOffset)),
+ uint(baseRPCPort+(i*portOffset)),
+ )
+ if err != nil {
+ // If we can't get the preferred ports, try from a higher range
+ p2pPort, rpcPort, err = GetBesuPorts(
+ uint(40303+(i*portOffset)),
+ uint(18545+(i*portOffset)),
+ )
+ if err != nil {
+ return nil, fmt.Errorf("failed to find available ports for node %d: %w", i+1, err)
+ }
+ }
+
+ // Find available metrics port
+ metricsPorts, err := findConsecutivePorts(int(baseMetricsPort+(i*portOffset)), 1, int(baseMetricsPort+(i*portOffset))+100)
+ if err != nil {
+ // If we can't get the preferred metrics port, try from a higher range
+ metricsPorts, err = findConsecutivePorts(int(19545+(i*portOffset)), 1, int(19545+(i*portOffset))+100)
+ if err != nil {
+ return nil, fmt.Errorf("failed to find available metrics port for node %d: %w", i+1, err)
+ }
+ }
+
+ // Create node defaults with unique ports
+ nodeDefaults[i] = BesuNodeDefaults{
+ P2PHost: externalIP, // Use external IP for p2p host
+ P2PPort: p2pPort,
+ RPCHost: "0.0.0.0", // Allow RPC from any interface
+ RPCPort: rpcPort,
+ ExternalIP: externalIP,
+ InternalIP: internalIP,
+ Mode: ModeService,
+ Env: map[string]string{
+ "JAVA_OPTS": "-Xmx4g",
+ },
+ // Set metrics configuration
+ MetricsEnabled: true,
+ MetricsHost: "0.0.0.0", // Allow metrics from any interface
+ MetricsPort: uint(metricsPorts[0]),
+ MetricsProtocol: "PROMETHEUS",
+ }
+ }
+
+ return nodeDefaults, nil
+}
+
+func (s *NodeService) getBesuFromConfig(ctx context.Context, dbNode *db.Node, config *types.BesuNodeConfig, deployConfig *types.BesuNodeDeploymentConfig) (*besu.LocalBesu, error) {
+ network, err := s.db.GetNetwork(ctx, deployConfig.NetworkID)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get network: %w", err)
+ }
+ key, err := s.keymanagementService.GetKey(ctx, int(config.KeyID))
+ if err != nil {
+ return nil, fmt.Errorf("failed to get key: %w", err)
+ }
+ privateKeyDecrypted, err := s.keymanagementService.GetDecryptedPrivateKey(int(config.KeyID))
+ if err != nil {
+ return nil, fmt.Errorf("failed to decrypt key: %w", err)
+ }
+ var networkConfig networktypes.BesuNetworkConfig
+ if err := json.Unmarshal([]byte(network.Config.String), &networkConfig); err != nil {
+ return nil, fmt.Errorf("failed to unmarshal network config: %w", err)
+ }
+
+ localBesu := besu.NewLocalBesu(
+ besu.StartBesuOpts{
+ ID: dbNode.Slug,
+ GenesisFile: network.GenesisBlockB64.String,
+ NetworkID: deployConfig.NetworkID,
+ P2PPort: fmt.Sprintf("%d", deployConfig.P2PPort),
+ RPCPort: fmt.Sprintf("%d", deployConfig.RPCPort),
+ ListenAddress: deployConfig.P2PHost,
+ MinerAddress: key.EthereumAddress,
+ ConsensusType: "qbft", // TODO: get consensus type from network
+ BootNodes: config.BootNodes,
+ Version: config.Version,
+ NodePrivateKey: strings.TrimPrefix(privateKeyDecrypted, "0x"),
+ Env: config.Env,
+ P2PHost: config.P2PHost,
+ RPCHost: config.RPCHost,
+ MetricsEnabled: config.MetricsEnabled,
+ MetricsPort: config.MetricsPort,
+ MetricsProtocol: config.MetricsProtocol,
+ },
+ string(config.Mode),
+ dbNode.ID,
+ s.logger,
+ s.configService,
+ s.settingsService,
+ networkConfig,
+ )
+
+ return localBesu, nil
+}
+
+// stopBesuNode stops a Besu node
+func (s *NodeService) stopBesuNode(ctx context.Context, dbNode *db.Node) error {
+ // Load node configuration
+ nodeConfig, err := utils.LoadNodeConfig([]byte(dbNode.NodeConfig.String))
+ if err != nil {
+ return fmt.Errorf("failed to deserialize node config: %w", err)
+ }
+ besuNodeConfig, ok := nodeConfig.(*types.BesuNodeConfig)
+ if !ok {
+ return fmt.Errorf("failed to assert node config to BesuNodeConfig")
+ }
+
+ // Load deployment configuration
+ deploymentConfig, err := utils.DeserializeDeploymentConfig(dbNode.DeploymentConfig.String)
+ if err != nil {
+ return fmt.Errorf("failed to deserialize deployment config: %w", err)
+ }
+ besuDeployConfig, ok := deploymentConfig.(*types.BesuNodeDeploymentConfig)
+ if !ok {
+ return fmt.Errorf("failed to assert deployment config to BesuNodeDeploymentConfig")
+ }
+
+ // Get Besu instance
+ localBesu, err := s.getBesuFromConfig(ctx, dbNode, besuNodeConfig, besuDeployConfig)
+ if err != nil {
+ return fmt.Errorf("failed to get besu instance: %w", err)
+ }
+
+ // Stop the node
+ err = localBesu.Stop()
+ if err != nil {
+ return fmt.Errorf("failed to stop besu node: %w", err)
+ }
+
+ return nil
+}
+
+// startBesuNode starts a Besu node
+func (s *NodeService) startBesuNode(ctx context.Context, dbNode *db.Node) error {
+ // Load node configuration
+ nodeConfig, err := utils.LoadNodeConfig([]byte(dbNode.NodeConfig.String))
+ if err != nil {
+ return fmt.Errorf("failed to deserialize node config: %w", err)
+ }
+ besuNodeConfig, ok := nodeConfig.(*types.BesuNodeConfig)
+ if !ok {
+ return fmt.Errorf("failed to assert node config to BesuNodeConfig")
+ }
+
+ // Load deployment configuration
+ deploymentConfig, err := utils.DeserializeDeploymentConfig(dbNode.DeploymentConfig.String)
+ if err != nil {
+ return fmt.Errorf("failed to deserialize deployment config: %w", err)
+ }
+ besuDeployConfig, ok := deploymentConfig.(*types.BesuNodeDeploymentConfig)
+ if !ok {
+ return fmt.Errorf("failed to assert deployment config to BesuNodeDeploymentConfig")
+ }
+
+ // Get key for node
+ key, err := s.keymanagementService.GetKey(ctx, int(besuNodeConfig.KeyID))
+ if err != nil {
+ return fmt.Errorf("failed to get key: %w", err)
+ }
+ network, err := s.db.GetNetwork(ctx, besuDeployConfig.NetworkID)
+ if err != nil {
+ return fmt.Errorf("failed to get network: %w", err)
+ }
+ privateKeyDecrypted, err := s.keymanagementService.GetDecryptedPrivateKey(int(besuNodeConfig.KeyID))
+ if err != nil {
+ return fmt.Errorf("failed to decrypt key: %w", err)
+ }
+ var networkConfig networktypes.BesuNetworkConfig
+ if err := json.Unmarshal([]byte(network.Config.String), &networkConfig); err != nil {
+ return fmt.Errorf("failed to unmarshal network config: %w", err)
+ }
+
+ // Create LocalBesu instance
+ localBesu := besu.NewLocalBesu(
+ besu.StartBesuOpts{
+ ID: dbNode.Slug,
+ GenesisFile: network.GenesisBlockB64.String,
+ NetworkID: besuDeployConfig.NetworkID,
+ ChainID: networkConfig.ChainID,
+ P2PPort: fmt.Sprintf("%d", besuDeployConfig.P2PPort),
+ RPCPort: fmt.Sprintf("%d", besuDeployConfig.RPCPort),
+ ListenAddress: besuDeployConfig.P2PHost,
+ MinerAddress: key.EthereumAddress,
+ ConsensusType: "qbft", // TODO: get consensus type from network
+ BootNodes: besuNodeConfig.BootNodes,
+ Version: "25.4.1", // TODO: get version from network
+ NodePrivateKey: strings.TrimPrefix(privateKeyDecrypted, "0x"),
+ Env: besuNodeConfig.Env,
+ P2PHost: besuNodeConfig.P2PHost,
+ RPCHost: besuNodeConfig.RPCHost,
+ MetricsEnabled: besuDeployConfig.MetricsEnabled,
+ MetricsPort: besuDeployConfig.MetricsPort,
+ MetricsProtocol: "PROMETHEUS",
+ },
+ string(besuNodeConfig.Mode),
+ dbNode.ID,
+ s.logger,
+ s.configService,
+ s.settingsService,
+ networkConfig,
+ )
+
+ // Start the node
+ _, err = localBesu.Start()
+ if err != nil {
+ return fmt.Errorf("failed to start besu node: %w", err)
+ }
+
+ s.logger.Info("Started Besu node",
+ "nodeID", dbNode.ID,
+ "name", dbNode.Name,
+ "networkID", besuDeployConfig.NetworkID,
+ )
+
+ return nil
+}
+
+// UpdateBesuNodeOpts contains the options for updating a Besu node
+type UpdateBesuNodeRequest struct {
+ NetworkID uint `json:"networkId" validate:"required"`
+ P2PHost string `json:"p2pHost" validate:"required"`
+ P2PPort uint `json:"p2pPort" validate:"required"`
+ RPCHost string `json:"rpcHost" validate:"required"`
+ RPCPort uint `json:"rpcPort" validate:"required"`
+ Bootnodes []string `json:"bootnodes,omitempty"`
+ ExternalIP string `json:"externalIp,omitempty"`
+ InternalIP string `json:"internalIp,omitempty"`
+ Env map[string]string `json:"env,omitempty"`
+ // Metrics configuration
+ MetricsEnabled bool `json:"metricsEnabled"`
+ MetricsPort int64 `json:"metricsPort"`
+}
+
+// UpdateBesuNode updates an existing Besu node configuration
+func (s *NodeService) UpdateBesuNode(ctx context.Context, nodeID int64, req UpdateBesuNodeRequest) (*NodeResponse, error) {
+ // Get existing node
+ node, err := s.db.GetNode(ctx, nodeID)
+ if err != nil {
+ if err == sql.ErrNoRows {
+ return nil, errors.NewNotFoundError("node not found", nil)
+ }
+ return nil, fmt.Errorf("failed to get node: %w", err)
+ }
+
+ // Verify node type
+ if types.NodeType(node.NodeType.String) != types.NodeTypeBesuFullnode {
+ return nil, errors.NewValidationError("node is not a Besu node", nil)
+ }
+
+ // Load current config
+ nodeConfig, err := utils.LoadNodeConfig([]byte(node.NodeConfig.String))
+ if err != nil {
+ return nil, fmt.Errorf("failed to load besu config: %w", err)
+ }
+
+ besuConfig, ok := nodeConfig.(*types.BesuNodeConfig)
+ if !ok {
+ return nil, fmt.Errorf("invalid besu config type")
+ }
+
+ // Load deployment config
+ deployBesuConfig := &types.BesuNodeDeploymentConfig{}
+ if node.DeploymentConfig.Valid {
+ deploymentConfig, err := utils.DeserializeDeploymentConfig(node.DeploymentConfig.String)
+ if err != nil {
+ return nil, fmt.Errorf("failed to deserialize deployment config: %w", err)
+ }
+ var ok bool
+ deployBesuConfig, ok = deploymentConfig.(*types.BesuNodeDeploymentConfig)
+ if !ok {
+ return nil, fmt.Errorf("invalid besu deployment config type")
+ }
+ }
+
+ // Update configuration fields
+ besuConfig.NetworkID = int64(req.NetworkID)
+ besuConfig.P2PPort = req.P2PPort
+ besuConfig.RPCPort = req.RPCPort
+ besuConfig.P2PHost = req.P2PHost
+ besuConfig.RPCHost = req.RPCHost
+ deployBesuConfig.NetworkID = int64(req.NetworkID)
+ deployBesuConfig.P2PPort = req.P2PPort
+ deployBesuConfig.RPCPort = req.RPCPort
+ deployBesuConfig.P2PHost = req.P2PHost
+ deployBesuConfig.RPCHost = req.RPCHost
+ if req.Bootnodes != nil {
+ besuConfig.BootNodes = req.Bootnodes
+ }
+
+ if req.ExternalIP != "" {
+ besuConfig.ExternalIP = req.ExternalIP
+ deployBesuConfig.ExternalIP = req.ExternalIP
+ }
+ if req.InternalIP != "" {
+ besuConfig.InternalIP = req.InternalIP
+ deployBesuConfig.InternalIP = req.InternalIP
+ }
+
+ // Update metrics configuration
+ besuConfig.MetricsEnabled = req.MetricsEnabled
+ besuConfig.MetricsPort = req.MetricsPort
+ deployBesuConfig.MetricsEnabled = req.MetricsEnabled
+ deployBesuConfig.MetricsPort = req.MetricsPort
+
+ // Update environment variables
+ if req.Env != nil {
+ besuConfig.Env = req.Env
+ deployBesuConfig.Env = req.Env
+ }
+
+ // Get the key to update the enodeURL
+ key, err := s.keymanagementService.GetKey(ctx, int(besuConfig.KeyID))
+ if err != nil {
+ return nil, fmt.Errorf("failed to get key: %w", err)
+ }
+
+ // Update enodeURL based on the public key, external IP and P2P port
+ if key.PublicKey != "" {
+ publicKey := key.PublicKey[2:]
+ deployBesuConfig.EnodeURL = fmt.Sprintf("enode://%s@%s:%d", publicKey, besuConfig.ExternalIP, besuConfig.P2PPort)
+ }
+
+ // Store updated node config
+ configBytes, err := utils.StoreNodeConfig(besuConfig)
+ if err != nil {
+ return nil, fmt.Errorf("failed to store node config: %w", err)
+ }
+
+ node, err = s.db.UpdateNodeConfig(ctx, &db.UpdateNodeConfigParams{
+ ID: nodeID,
+ NodeConfig: sql.NullString{
+ String: string(configBytes),
+ Valid: true,
+ },
+ })
+ if err != nil {
+ return nil, fmt.Errorf("failed to update node config: %w", err)
+ }
+
+ // Update deployment config
+ deploymentConfigBytes, err := json.Marshal(deployBesuConfig)
+ if err != nil {
+ return nil, fmt.Errorf("failed to marshal deployment config: %w", err)
+ }
+
+ node, err = s.db.UpdateDeploymentConfig(ctx, &db.UpdateDeploymentConfigParams{
+ ID: nodeID,
+ DeploymentConfig: sql.NullString{
+ String: string(deploymentConfigBytes),
+ Valid: true,
+ },
+ })
+ if err != nil {
+ return nil, fmt.Errorf("failed to update deployment config: %w", err)
+ }
+
+ // Return updated node
+ _, nodeResponse := s.mapDBNodeToServiceNode(node)
+ return nodeResponse, nil
+}
+
+// validateBesuConfig validates the Besu node configuration
+func (s *NodeService) validateBesuConfig(config *types.BesuNodeConfig) error {
+
+ if config.P2PPort == 0 {
+ return fmt.Errorf("p2p port is required")
+ }
+ if config.RPCPort == 0 {
+ return fmt.Errorf("rpc port is required")
+ }
+ if config.NetworkID == 0 {
+ return fmt.Errorf("network ID is required")
+ }
+ if config.P2PHost == "" {
+ return fmt.Errorf("p2p host is required")
+ }
+ if config.RPCHost == "" {
+ return fmt.Errorf("rpc host is required")
+ }
+ if config.ExternalIP == "" {
+ return fmt.Errorf("external IP is required")
+ }
+ if config.InternalIP == "" {
+ return fmt.Errorf("internal IP is required")
+ }
+
+ return nil
+}
+
+// cleanupBesuResources cleans up resources specific to a Besu node
+func (s *NodeService) cleanupBesuResources(ctx context.Context, node *db.Node) error {
+
+ // Load node configuration
+ nodeConfig, err := utils.LoadNodeConfig([]byte(node.NodeConfig.String))
+ if err != nil {
+ s.logger.Warn("Failed to load node config during cleanup", "error", err)
+ // Continue with cleanup even if config loading fails
+ }
+
+ // Load deployment configuration
+ deploymentConfig, err := utils.DeserializeDeploymentConfig(node.DeploymentConfig.String)
+ if err != nil {
+ s.logger.Warn("Failed to load deployment config during cleanup", "error", err)
+ // Continue with cleanup even if config loading fails
+ }
+
+ // Create Besu instance for cleanup
+ var localBesu *besu.LocalBesu
+ if nodeConfig != nil && deploymentConfig != nil {
+ besuNodeConfig, ok := nodeConfig.(*types.BesuNodeConfig)
+ if !ok {
+ s.logger.Warn("Invalid node config type during cleanup")
+ }
+ besuDeployConfig, ok := deploymentConfig.(*types.BesuNodeDeploymentConfig)
+ if !ok {
+ s.logger.Warn("Invalid deployment config type during cleanup")
+ }
+ if besuNodeConfig != nil && besuDeployConfig != nil {
+ localBesu, err = s.getBesuFromConfig(ctx, node, besuNodeConfig, besuDeployConfig)
+ if err != nil {
+ s.logger.Warn("Failed to create Besu instance during cleanup", "error", err)
+ }
+ }
+ }
+
+ // Stop the service if it's running and we have a valid Besu instance
+ if localBesu != nil {
+ if err := localBesu.Stop(); err != nil {
+ s.logger.Warn("Failed to stop Besu service during cleanup", "error", err)
+ // Continue with cleanup even if stop fails
+ }
+ }
+
+ // Clean up Besu-specific directories
+ dirsToClean := []string{
+ filepath.Join(s.configService.GetDataPath(), "nodes", node.Slug),
+ filepath.Join(s.configService.GetDataPath(), "besu", node.Slug),
+ filepath.Join(s.configService.GetDataPath(), "besu", "nodes", node.Slug),
+ }
+
+ for _, dir := range dirsToClean {
+ if err := os.RemoveAll(dir); err != nil {
+ if !os.IsNotExist(err) {
+ s.logger.Warn("Failed to remove Besu directory",
+ "path", dir,
+ "error", err)
+ }
+ } else {
+ s.logger.Info("Successfully removed Besu directory",
+ "path", dir)
+ }
+ }
+
+ // Clean up service files based on platform
+ switch runtime.GOOS {
+ case "linux":
+ // Remove systemd service file
+ if localBesu != nil {
+ serviceFile := fmt.Sprintf("/etc/systemd/system/besu-%s.service", node.Slug)
+ if err := os.Remove(serviceFile); err != nil {
+ if !os.IsNotExist(err) {
+ s.logger.Warn("Failed to remove systemd service file", "error", err)
+ }
+ }
+ }
+
+ case "darwin":
+ // Remove launchd plist file
+ homeDir, err := os.UserHomeDir()
+ if err != nil {
+ return fmt.Errorf("failed to get home directory: %w", err)
+ }
+ if localBesu != nil {
+ plistFile := filepath.Join(homeDir, "Library/LaunchAgents", fmt.Sprintf("dev.chainlaunch.besu.%s.plist", node.Slug))
+ if err := os.Remove(plistFile); err != nil {
+ if !os.IsNotExist(err) {
+ s.logger.Warn("Failed to remove launchd plist file", "error", err)
+ }
+ }
+ }
+ }
+
+ // Clean up any data directories
+ dataDir := filepath.Join(s.configService.GetDataPath(), "data", "besu", node.Slug)
+ if err := os.RemoveAll(dataDir); err != nil {
+ if !os.IsNotExist(err) {
+ s.logger.Warn("Failed to remove Besu data directory",
+ "path", dataDir,
+ "error", err)
+ }
+ } else {
+ s.logger.Info("Successfully removed Besu data directory",
+ "path", dataDir)
+ }
+
+ return nil
+}
+
+// initializeBesuNode initializes a Besu node
+func (s *NodeService) initializeBesuNode(ctx context.Context, dbNode *db.Node, config *types.BesuNodeConfig) (types.NodeDeploymentConfig, error) {
+ // Validate key exists
+ key, err := s.keymanagementService.GetKey(ctx, int(config.KeyID))
+ if err != nil {
+ return nil, fmt.Errorf("failed to get key: %w", err)
+ }
+ if key.EthereumAddress == "" {
+ return nil, fmt.Errorf("key %d has no ethereum address", config.KeyID)
+ }
+ enodeURL := fmt.Sprintf("enode://%s@%s:%d", key.PublicKey[2:], config.ExternalIP, config.P2PPort)
+
+ // Validate ports
+ if err := s.validatePort(config.P2PHost, int(config.P2PPort)); err != nil {
+ return nil, fmt.Errorf("invalid P2P port: %w", err)
+ }
+ if err := s.validatePort(config.RPCHost, int(config.RPCPort)); err != nil {
+ return nil, fmt.Errorf("invalid RPC port: %w", err)
+ }
+
+ // Create deployment config
+ deploymentConfig := &types.BesuNodeDeploymentConfig{
+ BaseDeploymentConfig: types.BaseDeploymentConfig{
+ Type: "besu",
+ Mode: string(config.Mode),
+ },
+ KeyID: config.KeyID,
+ P2PPort: config.P2PPort,
+ RPCPort: config.RPCPort,
+ NetworkID: config.NetworkID,
+ ExternalIP: config.ExternalIP,
+ P2PHost: config.P2PHost,
+ RPCHost: config.RPCHost,
+ InternalIP: config.InternalIP,
+ EnodeURL: enodeURL,
+ MetricsEnabled: config.MetricsEnabled,
+ MetricsPort: config.MetricsPort,
+ }
+
+ // Update node endpoint
+ endpoint := fmt.Sprintf("%s:%d", config.P2PHost, config.P2PPort)
+ _, err = s.db.UpdateNodeEndpoint(ctx, &db.UpdateNodeEndpointParams{
+ ID: dbNode.ID,
+ Endpoint: sql.NullString{
+ String: endpoint,
+ Valid: true,
+ },
+ })
+ if err != nil {
+ return nil, fmt.Errorf("failed to update node endpoint: %w", err)
+ }
+
+ // Update node public endpoint if external IP is set
+ if config.ExternalIP != "" {
+ publicEndpoint := fmt.Sprintf("%s:%d", config.ExternalIP, config.P2PPort)
+ _, err = s.db.UpdateNodePublicEndpoint(ctx, &db.UpdateNodePublicEndpointParams{
+ ID: dbNode.ID,
+ PublicEndpoint: sql.NullString{
+ String: publicEndpoint,
+ Valid: true,
+ },
+ })
+ if err != nil {
+ return nil, fmt.Errorf("failed to update node public endpoint: %w", err)
+ }
+ }
+
+ return deploymentConfig, nil
+}
diff --git a/pkg/nodes/service/config.go b/pkg/nodes/service/config.go
index afebe15..efff1d8 100644
--- a/pkg/nodes/service/config.go
+++ b/pkg/nodes/service/config.go
@@ -110,4 +110,9 @@ type BesuNodeProperties struct {
Mode string `json:"mode"`
Version string `json:"version"`
BootNodes []string `json:"bootNodes"`
+ // Metrics configuration
+ MetricsEnabled bool `json:"metricsEnabled"`
+ MetricsHost string `json:"metricsHost"`
+ MetricsPort uint `json:"metricsPort"`
+ MetricsProtocol string `json:"metricsProtocol"`
}
diff --git a/pkg/nodes/service/defaults.go b/pkg/nodes/service/defaults.go
index 5419a79..4c0f220 100644
--- a/pkg/nodes/service/defaults.go
+++ b/pkg/nodes/service/defaults.go
@@ -33,6 +33,11 @@ type BesuNodeDefaults struct {
InternalIP string `json:"internalIp"`
Mode Mode `json:"mode"`
Env map[string]string `json:"environmentVariables"`
+ // Metrics configuration
+ MetricsEnabled bool `json:"metricsEnabled"`
+ MetricsHost string `json:"metricsHost"`
+ MetricsPort uint `json:"metricsPort"`
+ MetricsProtocol string `json:"metricsProtocol"`
}
// NodesDefaultsParams represents parameters for getting multiple nodes defaults
diff --git a/pkg/nodes/service/events.go b/pkg/nodes/service/events.go
index f0dc0e7..e612cea 100644
--- a/pkg/nodes/service/events.go
+++ b/pkg/nodes/service/events.go
@@ -15,11 +15,13 @@ import (
type NodeEventType string
const (
- NodeEventStarting NodeEventType = "STARTING"
- NodeEventStarted NodeEventType = "STARTED"
- NodeEventStopping NodeEventType = "STOPPING"
- NodeEventStopped NodeEventType = "STOPPED"
- NodeEventError NodeEventType = "ERROR"
+ NodeEventStarting NodeEventType = "STARTING"
+ NodeEventStarted NodeEventType = "STARTED"
+ NodeEventStopping NodeEventType = "STOPPING"
+ NodeEventStopped NodeEventType = "STOPPED"
+ NodeEventError NodeEventType = "ERROR"
+ NodeEventRenewingCertificates NodeEventType = "RENEWING_CERTIFICATES"
+ NodeEventRenewedCertificates NodeEventType = "RENEWED_CERTIFICATES"
)
// NodeEvent represents a node event in the service layer
diff --git a/pkg/nodes/service/fabric.go b/pkg/nodes/service/fabric.go
new file mode 100644
index 0000000..c1af9e1
--- /dev/null
+++ b/pkg/nodes/service/fabric.go
@@ -0,0 +1,1060 @@
+package service
+
+import (
+ "context"
+ "database/sql"
+ "encoding/json"
+ "fmt"
+ "net"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/chainlaunch/chainlaunch/pkg/db"
+ "github.com/chainlaunch/chainlaunch/pkg/errors"
+ fabricservice "github.com/chainlaunch/chainlaunch/pkg/fabric/service"
+ "github.com/chainlaunch/chainlaunch/pkg/nodes/orderer"
+ "github.com/chainlaunch/chainlaunch/pkg/nodes/peer"
+ "github.com/chainlaunch/chainlaunch/pkg/nodes/types"
+ "github.com/chainlaunch/chainlaunch/pkg/nodes/utils"
+)
+
+// GetFabricPeerDefaults returns default values for a Fabric peer node
+func (s *NodeService) GetFabricPeerDefaults() *NodeDefaults {
+ // Get available ports for peer services
+ listen, chaincode, events, operations, err := GetPeerPorts(7051)
+ if err != nil {
+ // If we can't get the preferred ports, try from a higher range
+ listen, chaincode, events, operations, err = GetPeerPorts(10000)
+ if err != nil {
+ s.logger.Error("Failed to get available ports for peer", "error", err)
+ // Fall back to default ports if all attempts fail
+ return &NodeDefaults{
+ ListenAddress: "0.0.0.0:7051",
+ ExternalEndpoint: "localhost:7051",
+ ChaincodeAddress: "0.0.0.0:7052",
+ EventsAddress: "0.0.0.0:7053",
+ OperationsListenAddress: "0.0.0.0:9443",
+ Mode: ModeService,
+ ServiceName: "fabric-peer",
+ LogPath: "/var/log/fabric/peer.log",
+ ErrorLogPath: "/var/log/fabric/peer.err",
+ }
+ }
+ }
+
+ return &NodeDefaults{
+ ListenAddress: fmt.Sprintf("0.0.0.0:%d", listen),
+ ExternalEndpoint: fmt.Sprintf("localhost:%d", listen),
+ ChaincodeAddress: fmt.Sprintf("0.0.0.0:%d", chaincode),
+ EventsAddress: fmt.Sprintf("0.0.0.0:%d", events),
+ OperationsListenAddress: fmt.Sprintf("0.0.0.0:%d", operations),
+ Mode: ModeService,
+ ServiceName: "fabric-peer",
+ LogPath: "/var/log/fabric/peer.log",
+ ErrorLogPath: "/var/log/fabric/peer.err",
+ }
+}
+
+// GetFabricOrdererDefaults returns default values for a Fabric orderer node
+func (s *NodeService) GetFabricOrdererDefaults() *NodeDefaults {
+ // Get available ports for orderer services
+ listen, admin, operations, err := GetOrdererPorts(7050)
+ if err != nil {
+ // If we can't get the preferred ports, try from a higher range
+ listen, admin, operations, err = GetOrdererPorts(10000)
+ if err != nil {
+ s.logger.Error("Failed to get available ports for orderer", "error", err)
+ // Fall back to default ports if all attempts fail
+ return &NodeDefaults{
+ ListenAddress: "0.0.0.0:7050",
+ ExternalEndpoint: "localhost:7050",
+ AdminAddress: "0.0.0.0:7053",
+ OperationsListenAddress: "0.0.0.0:8443",
+ Mode: ModeService,
+ ServiceName: "fabric-orderer",
+ LogPath: "/var/log/fabric/orderer.log",
+ ErrorLogPath: "/var/log/fabric/orderer.err",
+ }
+ }
+ }
+
+ return &NodeDefaults{
+ ListenAddress: fmt.Sprintf("0.0.0.0:%d", listen),
+ ExternalEndpoint: fmt.Sprintf("localhost:%d", listen),
+ AdminAddress: fmt.Sprintf("0.0.0.0:%d", admin),
+ OperationsListenAddress: fmt.Sprintf("0.0.0.0:%d", operations),
+ Mode: ModeService,
+ ServiceName: "fabric-orderer",
+ LogPath: "/var/log/fabric/orderer.log",
+ ErrorLogPath: "/var/log/fabric/orderer.err",
+ }
+}
+
+// Update the port offsets and base ports to prevent overlap
+const (
+ // Base ports for peers and orderers with sufficient spacing
+ peerBasePort = 7000 // Starting port for peers
+ ordererBasePort = 9000 // Starting port for orderers with 2000 port gap
+
+ // Port offsets to ensure no overlap within node types
+ peerPortOffset = 100 // Each peer gets a 100 port range
+ ordererPortOffset = 100 // Each orderer gets a 100 port range
+
+ maxPortAttempts = 100 // Maximum attempts to find available ports
+)
+
+// GetFabricNodesDefaults returns default values for multiple nodes with guaranteed non-overlapping ports
+func (s *NodeService) GetFabricNodesDefaults(params NodesDefaultsParams) (*NodesDefaultsResult, error) {
+ // Validate node counts
+ if params.PeerCount > 15 {
+ return nil, fmt.Errorf("peer count exceeds maximum supported nodes (15)")
+ }
+ if params.OrdererCount > 15 {
+ return nil, fmt.Errorf("orderer count exceeds maximum supported nodes (15)")
+ }
+
+ result := &NodesDefaultsResult{
+ Peers: make([]NodeDefaults, params.PeerCount),
+ Orderers: make([]NodeDefaults, params.OrdererCount),
+ AvailableAddresses: []string{"localhost", "0.0.0.0"},
+ }
+
+ // Generate peer defaults with incremental ports
+ // Each peer needs 4 ports (listen, chaincode, events, operations)
+ for i := 0; i < params.PeerCount; i++ {
+ basePort := peerBasePort + (i * peerPortOffset)
+ listen, chaincode, events, operations, err := GetPeerPorts(basePort)
+ if err != nil {
+ // Try with a higher range if initial attempt fails
+ listen, chaincode, events, operations, err = GetPeerPorts(10000 + (i * peerPortOffset))
+ if err != nil {
+ return nil, fmt.Errorf("failed to get peer ports: %w", err)
+ }
+ }
+
+ // Validate that ports don't overlap with orderer range
+ if listen >= ordererBasePort || chaincode >= ordererBasePort ||
+ events >= ordererBasePort || operations >= ordererBasePort {
+ return nil, fmt.Errorf("peer ports would overlap with orderer port range")
+ }
+
+ result.Peers[i] = NodeDefaults{
+ ListenAddress: fmt.Sprintf("0.0.0.0:%d", listen),
+ ExternalEndpoint: fmt.Sprintf("localhost:%d", listen),
+ ChaincodeAddress: fmt.Sprintf("0.0.0.0:%d", chaincode),
+ EventsAddress: fmt.Sprintf("0.0.0.0:%d", events),
+ OperationsListenAddress: fmt.Sprintf("0.0.0.0:%d", operations),
+ Mode: params.Mode,
+ ServiceName: fmt.Sprintf("fabric-peer-%d", i+1),
+ LogPath: fmt.Sprintf("/var/log/fabric/peer%d.log", i+1),
+ ErrorLogPath: fmt.Sprintf("/var/log/fabric/peer%d.err", i+1),
+ }
+ }
+
+ // Generate orderer defaults with incremental ports
+ // Each orderer needs 3 ports (listen, admin, operations)
+ for i := 0; i < params.OrdererCount; i++ {
+ basePort := ordererBasePort + (i * ordererPortOffset)
+ listen, admin, operations, err := GetOrdererPorts(basePort)
+ if err != nil {
+ // Try with a higher range if initial attempt fails
+ listen, admin, operations, err = GetOrdererPorts(11000 + (i * ordererPortOffset))
+ if err != nil {
+ return nil, fmt.Errorf("failed to get orderer ports: %w", err)
+ }
+ }
+
+ // Validate that ports don't overlap with peer range
+ maxPeerPort := peerBasePort + (15 * peerPortOffset) // Account for maximum possible peers
+ if listen <= maxPeerPort ||
+ admin <= maxPeerPort ||
+ operations <= maxPeerPort {
+ return nil, fmt.Errorf("orderer ports would overlap with peer port range")
+ }
+
+ result.Orderers[i] = NodeDefaults{
+ ListenAddress: fmt.Sprintf("0.0.0.0:%d", listen),
+ ExternalEndpoint: fmt.Sprintf("localhost:%d", listen),
+ AdminAddress: fmt.Sprintf("0.0.0.0:%d", admin),
+ OperationsListenAddress: fmt.Sprintf("0.0.0.0:%d", operations),
+ Mode: params.Mode,
+ ServiceName: fmt.Sprintf("fabric-orderer-%d", i+1),
+ LogPath: fmt.Sprintf("/var/log/fabric/orderer%d.log", i+1),
+ ErrorLogPath: fmt.Sprintf("/var/log/fabric/orderer%d.err", i+1),
+ }
+ }
+
+ return result, nil
+}
+
+// GetFabricPeer gets a Fabric peer node configuration
+func (s *NodeService) GetFabricPeer(ctx context.Context, id int64) (*peer.LocalPeer, error) {
+ // Get the node from database
+ node, err := s.db.GetNode(ctx, id)
+ if err != nil {
+ if err == sql.ErrNoRows {
+ return nil, fmt.Errorf("peer node not found: %w", err)
+ }
+ return nil, fmt.Errorf("failed to get peer node: %w", err)
+ }
+
+ // Verify node type
+ if types.NodeType(node.NodeType.String) != types.NodeTypeFabricPeer {
+ return nil, fmt.Errorf("node %d is not a Fabric peer", id)
+ }
+
+ // Load node config
+ nodeConfig, err := utils.LoadNodeConfig([]byte(node.NodeConfig.String))
+ if err != nil {
+ return nil, fmt.Errorf("failed to load peer config: %w", err)
+ }
+
+ // Type assert to FabricPeerConfig
+ peerConfig, ok := nodeConfig.(*types.FabricPeerConfig)
+ if !ok {
+ return nil, fmt.Errorf("invalid peer config type")
+ }
+
+ // Get deployment config if available
+ if node.DeploymentConfig.Valid {
+ deploymentConfig, err := utils.DeserializeDeploymentConfig(node.DeploymentConfig.String)
+ if err != nil {
+ s.logger.Warn("Failed to deserialize deployment config", "error", err)
+ } else {
+ // Update config with deployment values
+ if deployConfig, ok := deploymentConfig.(*types.FabricPeerDeploymentConfig); ok {
+ peerConfig.ExternalEndpoint = deployConfig.ExternalEndpoint
+ // Add any other deployment-specific fields that should be included
+ }
+ }
+ }
+
+ // Get organization
+ org, err := s.orgService.GetOrganization(ctx, peerConfig.OrganizationID)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get organization: %w", err)
+ }
+
+ // Create and return local peer
+ localPeer := s.getPeerFromConfig(node, org, peerConfig)
+ return localPeer, nil
+}
+
+// GetFabricOrderer gets a Fabric orderer node configuration
+func (s *NodeService) GetFabricOrderer(ctx context.Context, id int64) (*orderer.LocalOrderer, error) {
+ // Get the node from database
+ node, err := s.db.GetNode(ctx, id)
+ if err != nil {
+ if err == sql.ErrNoRows {
+ return nil, fmt.Errorf("orderer node not found: %w", err)
+ }
+ return nil, fmt.Errorf("failed to get orderer node: %w", err)
+ }
+
+ // Verify node type
+ if types.NodeType(node.NodeType.String) != types.NodeTypeFabricOrderer {
+ return nil, fmt.Errorf("node %d is not a Fabric orderer", id)
+ }
+
+ // Load node config
+ nodeConfig, err := utils.LoadNodeConfig([]byte(node.NodeConfig.String))
+ if err != nil {
+ return nil, fmt.Errorf("failed to load orderer config: %w", err)
+ }
+
+ // Type assert to FabricOrdererConfig
+ ordererConfig, ok := nodeConfig.(*types.FabricOrdererConfig)
+ if !ok {
+ return nil, fmt.Errorf("invalid orderer config type")
+ }
+
+ // Get deployment config if available
+ if node.DeploymentConfig.Valid {
+ deploymentConfig, err := utils.DeserializeDeploymentConfig(node.DeploymentConfig.String)
+ if err != nil {
+ s.logger.Warn("Failed to deserialize deployment config", "error", err)
+ } else {
+ // Update config with deployment values
+ if deployConfig, ok := deploymentConfig.(*types.FabricOrdererDeploymentConfig); ok {
+ ordererConfig.ExternalEndpoint = deployConfig.ExternalEndpoint
+ // Add any other deployment-specific fields that should be included
+ }
+ }
+ }
+
+ // Get organization
+ org, err := s.orgService.GetOrganization(ctx, ordererConfig.OrganizationID)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get organization: %w", err)
+ }
+
+ // Create and return local orderer
+ localOrderer := s.getOrdererFromConfig(node, org, ordererConfig)
+ return localOrderer, nil
+}
+
+// GetFabricNodesByOrganization gets all Fabric nodes (peers and orderers) for an organization
+func (s *NodeService) GetFabricNodesByOrganization(ctx context.Context, orgID int64) ([]NodeResponse, error) {
+ // Get all nodes
+ nodes, err := s.GetAllNodes(ctx)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get nodes: %w", err)
+ }
+
+ // Filter nodes by organization
+ var orgNodes []NodeResponse
+ for _, node := range nodes.Items {
+ // Check node type and config
+ switch node.NodeType {
+ case types.NodeTypeFabricPeer:
+ if node.FabricPeer != nil {
+ if node.FabricPeer.OrganizationID == orgID {
+ orgNodes = append(orgNodes, node)
+ }
+ }
+ case types.NodeTypeFabricOrderer:
+ if node.FabricOrderer != nil {
+ if node.FabricOrderer.OrganizationID == orgID {
+ orgNodes = append(orgNodes, node)
+ }
+ }
+ }
+ }
+
+ return orgNodes, nil
+}
+
+// startFabricPeer starts a Fabric peer node
+func (s *NodeService) startFabricPeer(ctx context.Context, dbNode *db.Node) error {
+
+ nodeConfig, err := utils.LoadNodeConfig([]byte(dbNode.NodeConfig.String))
+ if err != nil {
+ return fmt.Errorf("failed to deserialize node config: %w", err)
+ }
+ peerNodeConfig, ok := nodeConfig.(*types.FabricPeerConfig)
+ if !ok {
+ return fmt.Errorf("failed to assert node config to FabricPeerConfig")
+ }
+
+ deploymentConfig, err := utils.DeserializeDeploymentConfig(dbNode.DeploymentConfig.String)
+ if err != nil {
+ return fmt.Errorf("failed to deserialize deployment config: %w", err)
+ }
+ s.logger.Info("Starting fabric peer", "deploymentConfig", deploymentConfig)
+
+ peerConfig := deploymentConfig.ToFabricPeerConfig()
+
+ org, err := s.orgService.GetOrganization(ctx, peerConfig.OrganizationID)
+ if err != nil {
+ return fmt.Errorf("failed to get organization: %w", err)
+ }
+
+ localPeer := s.getPeerFromConfig(dbNode, org, peerNodeConfig)
+
+ _, err = localPeer.Start()
+ if err != nil {
+ return fmt.Errorf("failed to start peer: %w", err)
+ }
+
+ return nil
+}
+
+// stopFabricPeer stops a Fabric peer node
+func (s *NodeService) stopFabricPeer(ctx context.Context, dbNode *db.Node) error {
+ deploymentConfig, err := utils.DeserializeDeploymentConfig(dbNode.NodeConfig.String)
+ if err != nil {
+ return fmt.Errorf("failed to deserialize deployment config: %w", err)
+ }
+ nodeConfig, err := utils.LoadNodeConfig([]byte(dbNode.NodeConfig.String))
+ if err != nil {
+ return fmt.Errorf("failed to deserialize node config: %w", err)
+ }
+ peerNodeConfig, ok := nodeConfig.(*types.FabricPeerConfig)
+ if !ok {
+ return fmt.Errorf("failed to assert node config to FabricPeerConfig")
+ }
+ s.logger.Debug("peerNodeConfig", "peerNodeConfig", peerNodeConfig)
+ peerConfig := deploymentConfig.ToFabricPeerConfig()
+ s.logger.Debug("peerConfig", "peerConfig", peerConfig)
+ org, err := s.orgService.GetOrganization(ctx, peerNodeConfig.OrganizationID)
+ if err != nil {
+ return fmt.Errorf("failed to get organization: %w", err)
+ }
+
+ localPeer := s.getPeerFromConfig(dbNode, org, peerNodeConfig)
+
+ err = localPeer.Stop()
+ if err != nil {
+ return fmt.Errorf("failed to stop peer: %w", err)
+ }
+
+ return nil
+}
+
+// startFabricOrderer starts a Fabric orderer node
+func (s *NodeService) startFabricOrderer(ctx context.Context, dbNode *db.Node) error {
+ nodeConfig, err := utils.LoadNodeConfig([]byte(dbNode.NodeConfig.String))
+ if err != nil {
+ return fmt.Errorf("failed to deserialize node config: %w", err)
+ }
+ ordererNodeConfig, ok := nodeConfig.(*types.FabricOrdererConfig)
+ if !ok {
+ return fmt.Errorf("failed to assert node config to FabricOrdererConfig")
+ }
+
+ org, err := s.orgService.GetOrganization(ctx, ordererNodeConfig.OrganizationID)
+ if err != nil {
+ return fmt.Errorf("failed to get organization: %w", err)
+ }
+
+ localOrderer := s.getOrdererFromConfig(dbNode, org, ordererNodeConfig)
+
+ _, err = localOrderer.Start()
+ if err != nil {
+ return fmt.Errorf("failed to start orderer: %w", err)
+ }
+
+ return nil
+}
+
+// stopFabricOrderer stops a Fabric orderer node
+func (s *NodeService) stopFabricOrderer(ctx context.Context, dbNode *db.Node) error {
+ nodeConfig, err := utils.LoadNodeConfig([]byte(dbNode.NodeConfig.String))
+ if err != nil {
+ return fmt.Errorf("failed to deserialize node config: %w", err)
+ }
+ ordererNodeConfig, ok := nodeConfig.(*types.FabricOrdererConfig)
+ if !ok {
+ return fmt.Errorf("failed to assert node config to FabricOrdererConfig")
+ }
+
+ org, err := s.orgService.GetOrganization(ctx, ordererNodeConfig.OrganizationID)
+ if err != nil {
+ return fmt.Errorf("failed to get organization: %w", err)
+ }
+
+ localOrderer := s.getOrdererFromConfig(dbNode, org, ordererNodeConfig)
+
+ err = localOrderer.Stop()
+ if err != nil {
+ return fmt.Errorf("failed to stop orderer: %w", err)
+ }
+
+ return nil
+}
+
+// UpdateFabricPeer updates a Fabric peer node configuration
+func (s *NodeService) UpdateFabricPeer(ctx context.Context, opts UpdateFabricPeerOpts) (*NodeResponse, error) {
+ // Get the node from database
+ node, err := s.db.GetNode(ctx, opts.NodeID)
+ if err != nil {
+ if err == sql.ErrNoRows {
+ return nil, errors.NewNotFoundError("peer node not found", nil)
+ }
+ return nil, fmt.Errorf("failed to get peer node: %w", err)
+ }
+
+ // Verify node type
+ if types.NodeType(node.NodeType.String) != types.NodeTypeFabricPeer {
+ return nil, fmt.Errorf("node %d is not a Fabric peer", opts.NodeID)
+ }
+
+ // Load current config
+ nodeConfig, err := utils.LoadNodeConfig([]byte(node.NodeConfig.String))
+ if err != nil {
+ return nil, fmt.Errorf("failed to load peer config: %w", err)
+ }
+
+ peerConfig, ok := nodeConfig.(*types.FabricPeerConfig)
+ if !ok {
+ return nil, fmt.Errorf("invalid peer config type")
+ }
+
+ deployConfig, err := utils.DeserializeDeploymentConfig(node.DeploymentConfig.String)
+ if err != nil {
+ return nil, fmt.Errorf("failed to deserialize deployment config: %w", err)
+ }
+ deployPeerConfig, ok := deployConfig.(*types.FabricPeerDeploymentConfig)
+ if !ok {
+ return nil, fmt.Errorf("invalid deployment config type")
+ }
+
+ // Update configuration fields if provided
+ if opts.ExternalEndpoint != "" && opts.ExternalEndpoint != peerConfig.ExternalEndpoint {
+ peerConfig.ExternalEndpoint = opts.ExternalEndpoint
+ }
+ if opts.ListenAddress != "" && opts.ListenAddress != peerConfig.ListenAddress {
+ if err := s.validateAddress(opts.ListenAddress); err != nil {
+ return nil, fmt.Errorf("invalid listen address: %w", err)
+ }
+ peerConfig.ListenAddress = opts.ListenAddress
+ }
+ if opts.EventsAddress != "" && opts.EventsAddress != peerConfig.EventsAddress {
+ if err := s.validateAddress(opts.EventsAddress); err != nil {
+ return nil, fmt.Errorf("invalid events address: %w", err)
+ }
+ peerConfig.EventsAddress = opts.EventsAddress
+ }
+ if opts.OperationsListenAddress != "" && opts.OperationsListenAddress != peerConfig.OperationsListenAddress {
+ if err := s.validateAddress(opts.OperationsListenAddress); err != nil {
+ return nil, fmt.Errorf("invalid operations listen address: %w", err)
+ }
+ peerConfig.OperationsListenAddress = opts.OperationsListenAddress
+ }
+ if opts.ChaincodeAddress != "" && opts.ChaincodeAddress != peerConfig.ChaincodeAddress {
+ if err := s.validateAddress(opts.ChaincodeAddress); err != nil {
+ return nil, fmt.Errorf("invalid chaincode address: %w", err)
+ }
+ peerConfig.ChaincodeAddress = opts.ChaincodeAddress
+ }
+ if opts.DomainNames != nil {
+ peerConfig.DomainNames = opts.DomainNames
+ }
+ if opts.Env != nil {
+ peerConfig.Env = opts.Env
+ }
+ if opts.AddressOverrides != nil {
+ peerConfig.AddressOverrides = opts.AddressOverrides
+ deployPeerConfig.AddressOverrides = opts.AddressOverrides
+ }
+ if opts.Version != "" {
+ peerConfig.Version = opts.Version
+ deployPeerConfig.Version = opts.Version
+ }
+
+ // Validate all addresses together for port conflicts
+ if err := s.validateFabricPeerAddresses(peerConfig); err != nil {
+ return nil, err
+ }
+
+ configBytes, err := utils.StoreNodeConfig(nodeConfig)
+ if err != nil {
+ return nil, fmt.Errorf("failed to store node config: %w", err)
+ }
+ node, err = s.db.UpdateNodeConfig(ctx, &db.UpdateNodeConfigParams{
+ ID: opts.NodeID,
+ NodeConfig: sql.NullString{
+ String: string(configBytes),
+ Valid: true,
+ },
+ })
+ if err != nil {
+ return nil, fmt.Errorf("failed to update node config: %w", err)
+ }
+
+ // Update the deployment config in the database
+ deploymentConfigBytes, err := json.Marshal(deployPeerConfig)
+ if err != nil {
+ return nil, fmt.Errorf("failed to marshal updated deployment config: %w", err)
+ }
+
+ node, err = s.db.UpdateDeploymentConfig(ctx, &db.UpdateDeploymentConfigParams{
+ ID: opts.NodeID,
+ DeploymentConfig: sql.NullString{
+ String: string(deploymentConfigBytes),
+ Valid: true,
+ },
+ })
+
+ // Synchronize the peer config
+ if err := s.SynchronizePeerConfig(ctx, opts.NodeID); err != nil {
+ return nil, fmt.Errorf("failed to synchronize peer config: %w", err)
+ }
+
+ // Return updated node response
+ _, nodeResponse := s.mapDBNodeToServiceNode(node)
+ return nodeResponse, nil
+}
+
+// UpdateFabricOrderer updates a Fabric orderer node configuration
+func (s *NodeService) UpdateFabricOrderer(ctx context.Context, opts UpdateFabricOrdererOpts) (*NodeResponse, error) {
+ // Get the node from database
+ node, err := s.db.GetNode(ctx, opts.NodeID)
+ if err != nil {
+ if err == sql.ErrNoRows {
+ return nil, errors.NewNotFoundError("orderer node not found", nil)
+ }
+ return nil, fmt.Errorf("failed to get orderer node: %w", err)
+ }
+
+ // Verify node type
+ if types.NodeType(node.NodeType.String) != types.NodeTypeFabricOrderer {
+ return nil, fmt.Errorf("node %d is not a Fabric orderer", opts.NodeID)
+ }
+
+ // Load current config
+ nodeConfig, err := utils.LoadNodeConfig([]byte(node.NodeConfig.String))
+ if err != nil {
+ return nil, fmt.Errorf("failed to load orderer config: %w", err)
+ }
+
+ ordererConfig, ok := nodeConfig.(*types.FabricOrdererConfig)
+ if !ok {
+ return nil, fmt.Errorf("invalid orderer config type")
+ }
+
+ // Load deployment config
+ deployOrdererConfig := &types.FabricOrdererDeploymentConfig{}
+ if node.DeploymentConfig.Valid {
+ deploymentConfig, err := utils.DeserializeDeploymentConfig(node.DeploymentConfig.String)
+ if err != nil {
+ return nil, fmt.Errorf("failed to deserialize deployment config: %w", err)
+ }
+ var ok bool
+ deployOrdererConfig, ok = deploymentConfig.(*types.FabricOrdererDeploymentConfig)
+ if !ok {
+ return nil, fmt.Errorf("invalid orderer deployment config type")
+ }
+ }
+
+ // Update configuration fields if provided
+ if opts.ExternalEndpoint != "" && opts.ExternalEndpoint != ordererConfig.ExternalEndpoint {
+ ordererConfig.ExternalEndpoint = opts.ExternalEndpoint
+ }
+ if opts.ListenAddress != "" && opts.ListenAddress != ordererConfig.ListenAddress {
+ if err := s.validateAddress(opts.ListenAddress); err != nil {
+ return nil, fmt.Errorf("invalid listen address: %w", err)
+ }
+ ordererConfig.ListenAddress = opts.ListenAddress
+ }
+ if opts.AdminAddress != "" && opts.AdminAddress != ordererConfig.AdminAddress {
+ if err := s.validateAddress(opts.AdminAddress); err != nil {
+ return nil, fmt.Errorf("invalid admin address: %w", err)
+ }
+ ordererConfig.AdminAddress = opts.AdminAddress
+ }
+ if opts.OperationsListenAddress != "" && opts.OperationsListenAddress != ordererConfig.OperationsListenAddress {
+ if err := s.validateAddress(opts.OperationsListenAddress); err != nil {
+ return nil, fmt.Errorf("invalid operations listen address: %w", err)
+ }
+ ordererConfig.OperationsListenAddress = opts.OperationsListenAddress
+ }
+ if opts.DomainNames != nil {
+ ordererConfig.DomainNames = opts.DomainNames
+ }
+ if opts.Env != nil {
+ ordererConfig.Env = opts.Env
+ }
+ if opts.Version != "" {
+ ordererConfig.Version = opts.Version
+ deployOrdererConfig.Version = opts.Version
+ }
+
+ configBytes, err := utils.StoreNodeConfig(nodeConfig)
+ if err != nil {
+ return nil, fmt.Errorf("failed to store node config: %w", err)
+ }
+ node, err = s.db.UpdateNodeConfig(ctx, &db.UpdateNodeConfigParams{
+ ID: opts.NodeID,
+ NodeConfig: sql.NullString{
+ String: string(configBytes),
+ Valid: true,
+ },
+ })
+ if err != nil {
+ return nil, fmt.Errorf("failed to update node config: %w", err)
+ }
+
+ // Update the deployment config in the database
+ deploymentConfigBytes, err := json.Marshal(deployOrdererConfig)
+ if err != nil {
+ return nil, fmt.Errorf("failed to marshal updated deployment config: %w", err)
+ }
+
+ node, err = s.db.UpdateDeploymentConfig(ctx, &db.UpdateDeploymentConfigParams{
+ ID: opts.NodeID,
+ DeploymentConfig: sql.NullString{
+ String: string(deploymentConfigBytes),
+ Valid: true,
+ },
+ })
+
+ // Return updated node response
+ _, nodeResponse := s.mapDBNodeToServiceNode(node)
+ return nodeResponse, nil
+}
+
+// SynchronizePeerConfig synchronizes the peer's configuration files and service
+func (s *NodeService) SynchronizePeerConfig(ctx context.Context, nodeID int64) error {
+ // Get the node from database
+ node, err := s.db.GetNode(ctx, nodeID)
+ if err != nil {
+ return fmt.Errorf("failed to get node: %w", err)
+ }
+
+ // Verify node type
+ if types.NodeType(node.NodeType.String) != types.NodeTypeFabricPeer {
+ return fmt.Errorf("node %d is not a Fabric peer", nodeID)
+ }
+
+ // Load node config
+ nodeConfig, err := utils.LoadNodeConfig([]byte(node.NodeConfig.String))
+ if err != nil {
+ return fmt.Errorf("failed to load node config: %w", err)
+ }
+
+ peerConfig, ok := nodeConfig.(*types.FabricPeerConfig)
+ if !ok {
+ return fmt.Errorf("invalid peer config type")
+ }
+
+ // Get organization
+ org, err := s.orgService.GetOrganization(ctx, peerConfig.OrganizationID)
+ if err != nil {
+ return fmt.Errorf("failed to get organization: %w", err)
+ }
+
+ // Get local peer instance
+ localPeer := s.getPeerFromConfig(node, org, peerConfig)
+
+ // Get deployment config
+ deploymentConfig, err := utils.DeserializeDeploymentConfig(node.DeploymentConfig.String)
+ if err != nil {
+ return fmt.Errorf("failed to deserialize deployment config: %w", err)
+ }
+
+ peerDeployConfig, ok := deploymentConfig.(*types.FabricPeerDeploymentConfig)
+ if !ok {
+ return fmt.Errorf("invalid peer deployment config type")
+ }
+
+ // Synchronize configuration
+ if err := localPeer.SynchronizeConfig(peerDeployConfig); err != nil {
+ return fmt.Errorf("failed to synchronize peer config: %w", err)
+ }
+
+ return nil
+}
+
+// validateFabricPeerAddresses validates all addresses used by a Fabric peer
+func (s *NodeService) validateFabricPeerAddresses(config *types.FabricPeerConfig) error {
+ // Get current addresses to compare against
+ currentAddresses := map[string]string{
+ "listen": config.ListenAddress,
+ "chaincode": config.ChaincodeAddress,
+ "events": config.EventsAddress,
+ "operations": config.OperationsListenAddress,
+ }
+
+ // Check for port conflicts between addresses
+ usedPorts := make(map[string]string)
+ for addrType, addr := range currentAddresses {
+ _, port, err := net.SplitHostPort(addr)
+ if err != nil {
+ return fmt.Errorf("invalid %s address format: %w", addrType, err)
+ }
+
+ if existingType, exists := usedPorts[port]; exists {
+ // If the port is already used by the same address type, it's okay
+ if existingType == addrType {
+ continue
+ }
+ return fmt.Errorf("port conflict: %s and %s addresses use the same port %s", existingType, addrType, port)
+ }
+ usedPorts[port] = addrType
+
+ // Only validate port availability if it's not already in use by this peer
+ if err := s.validateAddress(addr); err != nil {
+ // Check if the error is due to the port being in use by this peer
+ if strings.Contains(err.Error(), "address already in use") {
+ continue
+ }
+ return fmt.Errorf("invalid %s address: %w", addrType, err)
+ }
+ }
+
+ return nil
+}
+
+// validateFabricOrdererAddresses validates all addresses used by a Fabric orderer
+func (s *NodeService) validateFabricOrdererAddresses(config *types.FabricOrdererConfig) error {
+ // Validate listen address
+ if err := s.validateAddress(config.ListenAddress); err != nil {
+ return fmt.Errorf("invalid listen address: %w", err)
+ }
+
+ // Validate admin address
+ if err := s.validateAddress(config.AdminAddress); err != nil {
+ return fmt.Errorf("invalid admin address: %w", err)
+ }
+
+ // Validate operations listen address
+ if err := s.validateAddress(config.OperationsListenAddress); err != nil {
+ return fmt.Errorf("invalid operations listen address: %w", err)
+ }
+
+ // Check for port conflicts between addresses
+ addresses := map[string]string{
+ "listen": config.ListenAddress,
+ "admin": config.AdminAddress,
+ "operations": config.OperationsListenAddress,
+ }
+
+ usedPorts := make(map[string]string)
+ for addrType, addr := range addresses {
+ _, port, err := net.SplitHostPort(addr)
+ if err != nil {
+ return fmt.Errorf("invalid %s address format: %w", addrType, err)
+ }
+
+ if existingType, exists := usedPorts[port]; exists {
+ return fmt.Errorf("port conflict: %s and %s addresses use the same port %s", existingType, addrType, port)
+ }
+ usedPorts[port] = addrType
+ }
+
+ return nil
+}
+
+// initializeFabricPeer initializes a Fabric peer node
+func (s *NodeService) initializeFabricPeer(ctx context.Context, dbNode *db.Node, req *types.FabricPeerConfig) (types.NodeDeploymentConfig, error) {
+ org, err := s.orgService.GetOrganization(ctx, req.OrganizationID)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get organization: %w", err)
+ }
+
+ localPeer := s.getPeerFromConfig(dbNode, org, req)
+
+ // Get deployment config from initialization
+ peerConfig, err := localPeer.Init()
+ if err != nil {
+ return nil, fmt.Errorf("failed to initialize peer: %w", err)
+ }
+
+ return peerConfig, nil
+}
+
+// getOrdererFromConfig creates a LocalOrderer instance from configuration
+func (s *NodeService) getOrdererFromConfig(dbNode *db.Node, org *fabricservice.OrganizationDTO, config *types.FabricOrdererConfig) *orderer.LocalOrderer {
+ return orderer.NewLocalOrderer(
+ org.MspID,
+ s.db,
+ orderer.StartOrdererOpts{
+ ID: dbNode.Name,
+ ListenAddress: config.ListenAddress,
+ OperationsListenAddress: config.OperationsListenAddress,
+ AdminListenAddress: config.AdminAddress,
+ ExternalEndpoint: config.ExternalEndpoint,
+ DomainNames: config.DomainNames,
+ Env: config.Env,
+ Version: config.Version,
+ AddressOverrides: config.AddressOverrides,
+ },
+ config.Mode,
+ org,
+ config.OrganizationID,
+ s.orgService,
+ s.keymanagementService,
+ dbNode.ID,
+ s.logger,
+ s.configService,
+ s.settingsService,
+ )
+}
+
+// initializeFabricOrderer initializes a Fabric orderer node
+func (s *NodeService) initializeFabricOrderer(ctx context.Context, dbNode *db.Node, req *types.FabricOrdererConfig) (*types.FabricOrdererDeploymentConfig, error) {
+ org, err := s.orgService.GetOrganization(ctx, req.OrganizationID)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get organization: %w", err)
+ }
+
+ localOrderer := s.getOrdererFromConfig(dbNode, org, req)
+
+ // Get deployment config from initialization
+ config, err := localOrderer.Init()
+ if err != nil {
+ return nil, fmt.Errorf("failed to initialize orderer: %w", err)
+ }
+
+ // Type assert the config
+ ordererConfig, ok := config.(*types.FabricOrdererDeploymentConfig)
+ if !ok {
+ return nil, fmt.Errorf("invalid orderer config type")
+ }
+
+ return ordererConfig, nil
+}
+
+// getPeerFromConfig creates a peer instance from the given configuration and database node
+func (s *NodeService) getPeerFromConfig(dbNode *db.Node, org *fabricservice.OrganizationDTO, config *types.FabricPeerConfig) *peer.LocalPeer {
+ return peer.NewLocalPeer(
+ org.MspID,
+ s.db,
+ peer.StartPeerOpts{
+ ID: dbNode.Slug,
+ ListenAddress: config.ListenAddress,
+ ChaincodeAddress: config.ChaincodeAddress,
+ EventsAddress: config.EventsAddress,
+ OperationsListenAddress: config.OperationsListenAddress,
+ ExternalEndpoint: config.ExternalEndpoint,
+ DomainNames: config.DomainNames,
+ Env: config.Env,
+ Version: config.Version,
+ AddressOverrides: config.AddressOverrides,
+ },
+ config.Mode,
+ org,
+ org.ID,
+ s.orgService,
+ s.keymanagementService,
+ dbNode.ID,
+ s.logger,
+ s.configService,
+ s.settingsService,
+ )
+}
+
+// renewPeerCertificates handles certificate renewal for a Fabric peer
+func (s *NodeService) renewPeerCertificates(ctx context.Context, dbNode *db.Node, deploymentConfig types.NodeDeploymentConfig) error {
+ // Create certificate renewal starting event
+ if err := s.eventService.CreateEvent(ctx, dbNode.ID, NodeEventRenewingCertificates, map[string]interface{}{
+ "node_id": dbNode.ID,
+ "name": dbNode.Name,
+ "action": "renewing_certificates",
+ "type": "peer",
+ }); err != nil {
+ s.logger.Error("Failed to create certificate renewal starting event", "error", err)
+ }
+
+ nodeConfig, err := utils.LoadNodeConfig([]byte(dbNode.NodeConfig.String))
+ if err != nil {
+ return fmt.Errorf("failed to load node config: %w", err)
+ }
+
+ peerConfig, ok := nodeConfig.(*types.FabricPeerConfig)
+ if !ok {
+ return fmt.Errorf("invalid peer config type")
+ }
+
+ peerDeployConfig, ok := deploymentConfig.(*types.FabricPeerDeploymentConfig)
+ if !ok {
+ return fmt.Errorf("invalid peer deployment config type")
+ }
+
+ org, err := s.orgService.GetOrganization(ctx, peerConfig.OrganizationID)
+ if err != nil {
+ return fmt.Errorf("failed to get organization: %w", err)
+ }
+
+ localPeer := s.getPeerFromConfig(dbNode, org, peerConfig)
+ err = localPeer.RenewCertificates(peerDeployConfig)
+ if err != nil {
+ return fmt.Errorf("failed to renew peer certificates: %w", err)
+ }
+
+ // Create certificate renewal completed event
+ if err := s.eventService.CreateEvent(ctx, dbNode.ID, NodeEventRenewedCertificates, map[string]interface{}{
+ "node_id": dbNode.ID,
+ "name": dbNode.Name,
+ "action": "renewing_certificates",
+ "type": "peer",
+ }); err != nil {
+ s.logger.Error("Failed to create certificate renewal completed event", "error", err)
+ }
+
+ return nil
+}
+
+// renewOrdererCertificates handles certificate renewal for a Fabric orderer
+func (s *NodeService) renewOrdererCertificates(ctx context.Context, dbNode *db.Node, deploymentConfig types.NodeDeploymentConfig) error {
+ // Create certificate renewal starting event
+ if err := s.eventService.CreateEvent(ctx, dbNode.ID, NodeEventRenewingCertificates, map[string]interface{}{
+ "node_id": dbNode.ID,
+ "name": dbNode.Name,
+ "action": "renewing_certificates",
+ "type": "orderer",
+ }); err != nil {
+ s.logger.Error("Failed to create certificate renewal starting event", "error", err)
+ }
+
+ nodeConfig, err := utils.LoadNodeConfig([]byte(dbNode.NodeConfig.String))
+ if err != nil {
+ return fmt.Errorf("failed to load node config: %w", err)
+ }
+
+ ordererConfig, ok := nodeConfig.(*types.FabricOrdererConfig)
+ if !ok {
+ return fmt.Errorf("invalid orderer config type")
+ }
+
+ ordererDeployConfig, ok := deploymentConfig.(*types.FabricOrdererDeploymentConfig)
+ if !ok {
+ return fmt.Errorf("invalid orderer deployment config type")
+ }
+
+ org, err := s.orgService.GetOrganization(ctx, ordererConfig.OrganizationID)
+ if err != nil {
+ return fmt.Errorf("failed to get organization: %w", err)
+ }
+
+ localOrderer := s.getOrdererFromConfig(dbNode, org, ordererConfig)
+ err = localOrderer.RenewCertificates(ordererDeployConfig)
+ if err != nil {
+ return fmt.Errorf("failed to renew orderer certificates: %w", err)
+ }
+
+ // Create certificate renewal completed event
+ if err := s.eventService.CreateEvent(ctx, dbNode.ID, NodeEventRenewedCertificates, map[string]interface{}{
+ "node_id": dbNode.ID,
+ "name": dbNode.Name,
+ "action": "renewing_certificates",
+ "type": "orderer",
+ }); err != nil {
+ s.logger.Error("Failed to create certificate renewal completed event", "error", err)
+ }
+
+ return nil
+}
+
+// cleanupPeerResources cleans up resources specific to a Fabric peer node
+func (s *NodeService) cleanupPeerResources(ctx context.Context, node *db.Node) error {
+ // Clean up peer-specific directories
+ dirsToClean := []string{
+ filepath.Join(s.configService.GetDataPath(), "nodes", node.Slug),
+ filepath.Join(s.configService.GetDataPath(), "peers", node.Slug),
+ filepath.Join(s.configService.GetDataPath(), "fabric", "peers", node.Slug),
+ }
+
+ for _, dir := range dirsToClean {
+ if err := os.RemoveAll(dir); err != nil {
+ if !os.IsNotExist(err) {
+ s.logger.Warn("Failed to remove peer directory",
+ "path", dir,
+ "error", err)
+ }
+ } else {
+ s.logger.Info("Successfully removed peer directory",
+ "path", dir)
+ }
+ }
+
+ return nil
+}
+
+// cleanupOrdererResources cleans up resources specific to a Fabric orderer node
+func (s *NodeService) cleanupOrdererResources(ctx context.Context, node *db.Node) error {
+
+ // Clean up orderer-specific directories
+ dirsToClean := []string{
+ filepath.Join(s.configService.GetDataPath(), "nodes", node.Slug),
+ filepath.Join(s.configService.GetDataPath(), "orderers", node.Slug),
+ filepath.Join(s.configService.GetDataPath(), "fabric", "orderers", node.Slug),
+ }
+
+ for _, dir := range dirsToClean {
+ if err := os.RemoveAll(dir); err != nil {
+ if !os.IsNotExist(err) {
+ s.logger.Warn("Failed to remove orderer directory",
+ "path", dir,
+ "error", err)
+ }
+ } else {
+ s.logger.Info("Successfully removed orderer directory",
+ "path", dir)
+ }
+ }
+
+ return nil
+}
diff --git a/pkg/nodes/service/ports.go b/pkg/nodes/service/ports.go
index d101b35..a00c0d3 100644
--- a/pkg/nodes/service/ports.go
+++ b/pkg/nodes/service/ports.go
@@ -8,12 +8,18 @@ import (
// isPortAvailable checks if a port is available by attempting to listen on it
func isPortAvailable(port int) bool {
- addr := fmt.Sprintf("0.0.0.0:%d", port)
- ln, err := net.Listen("tcp", addr)
- if err != nil {
- return false
+ addrs := []string{
+ "0.0.0.0",
+ "127.0.0.1",
+ }
+ for _, addr := range addrs {
+ fullAddr := fmt.Sprintf("%s:%d", addr, port)
+ ln, err := net.Listen("tcp", fullAddr)
+ if err != nil {
+ return false
+ }
+ ln.Close()
}
- ln.Close()
return true
}
diff --git a/pkg/nodes/service/service.go b/pkg/nodes/service/service.go
index b0eb0d8..0cfeb1a 100644
--- a/pkg/nodes/service/service.go
+++ b/pkg/nodes/service/service.go
@@ -20,13 +20,11 @@ import (
fabricservice "github.com/chainlaunch/chainlaunch/pkg/fabric/service"
keymanagement "github.com/chainlaunch/chainlaunch/pkg/keymanagement/service"
"github.com/chainlaunch/chainlaunch/pkg/logger"
- networktypes "github.com/chainlaunch/chainlaunch/pkg/networks/service/types"
- "github.com/chainlaunch/chainlaunch/pkg/nodes/besu"
- "github.com/chainlaunch/chainlaunch/pkg/nodes/orderer"
- "github.com/chainlaunch/chainlaunch/pkg/nodes/peer"
+ metricscommon "github.com/chainlaunch/chainlaunch/pkg/metrics/common"
"github.com/chainlaunch/chainlaunch/pkg/nodes/types"
"github.com/chainlaunch/chainlaunch/pkg/nodes/utils"
settingsservice "github.com/chainlaunch/chainlaunch/pkg/settings/service"
+ "github.com/hyperledger/fabric-protos-go-apiv2/peer/lifecycle"
)
// NodeService handles business logic for node management
@@ -38,6 +36,7 @@ type NodeService struct {
eventService *NodeEventService
configService *config.ConfigService
settingsService *settingsservice.SettingsService
+ metricsService metricscommon.Service
}
// CreateNodeRequest represents the service-layer request to create a node
@@ -71,6 +70,10 @@ func NewNodeService(
}
}
+func (s *NodeService) SetMetricsService(metricsService metricscommon.Service) {
+ s.metricsService = metricsService
+}
+
func (s *NodeService) validateCreateNodeRequest(req CreateNodeRequest) error {
if req.Name == "" {
return fmt.Errorf("name is required")
@@ -135,86 +138,6 @@ func (s *NodeService) validateAddress(address string) error {
return nil
}
-// validateFabricPeerAddresses validates all addresses used by a Fabric peer
-func (s *NodeService) validateFabricPeerAddresses(config *types.FabricPeerConfig) error {
- // Get current addresses to compare against
- currentAddresses := map[string]string{
- "listen": config.ListenAddress,
- "chaincode": config.ChaincodeAddress,
- "events": config.EventsAddress,
- "operations": config.OperationsListenAddress,
- }
-
- // Check for port conflicts between addresses
- usedPorts := make(map[string]string)
- for addrType, addr := range currentAddresses {
- _, port, err := net.SplitHostPort(addr)
- if err != nil {
- return fmt.Errorf("invalid %s address format: %w", addrType, err)
- }
-
- if existingType, exists := usedPorts[port]; exists {
- // If the port is already used by the same address type, it's okay
- if existingType == addrType {
- continue
- }
- return fmt.Errorf("port conflict: %s and %s addresses use the same port %s", existingType, addrType, port)
- }
- usedPorts[port] = addrType
-
- // Only validate port availability if it's not already in use by this peer
- if err := s.validateAddress(addr); err != nil {
- // Check if the error is due to the port being in use by this peer
- if strings.Contains(err.Error(), "address already in use") {
- continue
- }
- return fmt.Errorf("invalid %s address: %w", addrType, err)
- }
- }
-
- return nil
-}
-
-// validateFabricOrdererAddresses validates all addresses used by a Fabric orderer
-func (s *NodeService) validateFabricOrdererAddresses(config *types.FabricOrdererConfig) error {
- // Validate listen address
- if err := s.validateAddress(config.ListenAddress); err != nil {
- return fmt.Errorf("invalid listen address: %w", err)
- }
-
- // Validate admin address
- if err := s.validateAddress(config.AdminAddress); err != nil {
- return fmt.Errorf("invalid admin address: %w", err)
- }
-
- // Validate operations listen address
- if err := s.validateAddress(config.OperationsListenAddress); err != nil {
- return fmt.Errorf("invalid operations listen address: %w", err)
- }
-
- // Check for port conflicts between addresses
- addresses := map[string]string{
- "listen": config.ListenAddress,
- "admin": config.AdminAddress,
- "operations": config.OperationsListenAddress,
- }
-
- usedPorts := make(map[string]string)
- for addrType, addr := range addresses {
- _, port, err := net.SplitHostPort(addr)
- if err != nil {
- return fmt.Errorf("invalid %s address format: %w", addrType, err)
- }
-
- if existingType, exists := usedPorts[port]; exists {
- return fmt.Errorf("port conflict: %s and %s addresses use the same port %s", existingType, addrType, port)
- }
- usedPorts[port] = addrType
- }
-
- return nil
-}
-
// generateSlug creates a URL-friendly slug from a string
func (s *NodeService) generateSlug(name string) string {
// Convert to lowercase
@@ -232,7 +155,7 @@ func (s *NodeService) generateSlug(name string) string {
reg = regexp.MustCompile("-+")
slug = reg.ReplaceAllString(slug, "-")
- // Trim hyphens from start and end‰
+ // Trim hyphens from start and end
slug = strings.Trim(slug, "-")
return slug
@@ -377,8 +300,12 @@ func (s *NodeService) CreateNode(ctx context.Context, req CreateNodeRequest) (*N
if err != nil {
return nil, fmt.Errorf("failed to get node: %w", err)
}
+ // Map database node to service node
_, nodeResponse := s.mapDBNodeToServiceNode(node)
+ // Publish node created event
+ s.metricsService.Reload(ctx)
+
return nodeResponse, nil
}
@@ -390,7 +317,7 @@ func (s *NodeService) createNodeConfig(req CreateNodeRequest) (types.NodeConfig,
return &types.FabricPeerConfig{
BaseNodeConfig: types.BaseNodeConfig{
Type: "fabric-peer",
- Mode: "service",
+ Mode: req.FabricPeer.Mode,
},
Name: req.FabricPeer.Name,
OrganizationID: req.FabricPeer.OrganizationID,
@@ -408,7 +335,7 @@ func (s *NodeService) createNodeConfig(req CreateNodeRequest) (types.NodeConfig,
return &types.FabricOrdererConfig{
BaseNodeConfig: types.BaseNodeConfig{
Type: "fabric-orderer",
- Mode: "service",
+ Mode: req.FabricOrderer.Mode,
},
Name: req.FabricOrderer.Name,
OrganizationID: req.FabricOrderer.OrganizationID,
@@ -474,173 +401,6 @@ func (s *NodeService) initializeNode(ctx context.Context, dbNode *db.Node, req C
return nil, fmt.Errorf("unsupported platform: %s", dbNode.Platform)
}
-// getPeerFromConfig creates a peer instance from the given configuration and database node
-func (s *NodeService) getPeerFromConfig(dbNode *db.Node, org *fabricservice.OrganizationDTO, config *types.FabricPeerConfig) *peer.LocalPeer {
- return peer.NewLocalPeer(
- org.MspID,
- s.db,
- peer.StartPeerOpts{
- ID: dbNode.Slug,
- ListenAddress: config.ListenAddress,
- ChaincodeAddress: config.ChaincodeAddress,
- EventsAddress: config.EventsAddress,
- OperationsListenAddress: config.OperationsListenAddress,
- ExternalEndpoint: config.ExternalEndpoint,
- DomainNames: config.DomainNames,
- Env: config.Env,
- Version: config.Version,
- AddressOverrides: config.AddressOverrides,
- },
- config.Mode,
- org,
- org.ID,
- s.orgService,
- s.keymanagementService,
- dbNode.ID,
- s.logger,
- s.configService,
- s.settingsService,
- )
-}
-
-// initializeFabricPeer initializes a Fabric peer node
-func (s *NodeService) initializeFabricPeer(ctx context.Context, dbNode *db.Node, req *types.FabricPeerConfig) (types.NodeDeploymentConfig, error) {
- org, err := s.orgService.GetOrganization(ctx, req.OrganizationID)
- if err != nil {
- return nil, fmt.Errorf("failed to get organization: %w", err)
- }
-
- localPeer := s.getPeerFromConfig(dbNode, org, req)
-
- // Get deployment config from initialization
- peerConfig, err := localPeer.Init()
- if err != nil {
- return nil, fmt.Errorf("failed to initialize peer: %w", err)
- }
-
- return peerConfig, nil
-}
-
-// getOrdererFromConfig creates a LocalOrderer instance from configuration
-func (s *NodeService) getOrdererFromConfig(dbNode *db.Node, org *fabricservice.OrganizationDTO, config *types.FabricOrdererConfig) *orderer.LocalOrderer {
- return orderer.NewLocalOrderer(
- org.MspID,
- s.db,
- orderer.StartOrdererOpts{
- ID: dbNode.Name,
- ListenAddress: config.ListenAddress,
- OperationsListenAddress: config.OperationsListenAddress,
- AdminListenAddress: config.AdminAddress,
- ExternalEndpoint: config.ExternalEndpoint,
- DomainNames: config.DomainNames,
- Env: config.Env,
- Version: config.Version,
- AddressOverrides: config.AddressOverrides,
- },
- config.Mode,
- org,
- config.OrganizationID,
- s.orgService,
- s.keymanagementService,
- dbNode.ID,
- s.logger,
- s.configService,
- s.settingsService,
- )
-}
-
-// initializeFabricOrderer initializes a Fabric orderer node
-func (s *NodeService) initializeFabricOrderer(ctx context.Context, dbNode *db.Node, req *types.FabricOrdererConfig) (*types.FabricOrdererDeploymentConfig, error) {
- org, err := s.orgService.GetOrganization(ctx, req.OrganizationID)
- if err != nil {
- return nil, fmt.Errorf("failed to get organization: %w", err)
- }
-
- localOrderer := s.getOrdererFromConfig(dbNode, org, req)
-
- // Get deployment config from initialization
- config, err := localOrderer.Init()
- if err != nil {
- return nil, fmt.Errorf("failed to initialize orderer: %w", err)
- }
-
- // Type assert the config
- ordererConfig, ok := config.(*types.FabricOrdererDeploymentConfig)
- if !ok {
- return nil, fmt.Errorf("invalid orderer config type")
- }
-
- return ordererConfig, nil
-}
-
-// initializeBesuNode initializes a Besu node
-func (s *NodeService) initializeBesuNode(ctx context.Context, dbNode *db.Node, config *types.BesuNodeConfig) (types.NodeDeploymentConfig, error) {
- // Validate key exists
- key, err := s.keymanagementService.GetKey(ctx, int(config.KeyID))
- if err != nil {
- return nil, fmt.Errorf("failed to get key: %w", err)
- }
- if key.EthereumAddress == "" {
- return nil, fmt.Errorf("key %d has no ethereum address", config.KeyID)
- }
- enodeURL := fmt.Sprintf("enode://%s@%s:%d", key.PublicKey[2:], config.ExternalIP, config.P2PPort)
-
- // Validate ports
- if err := s.validatePort(config.P2PHost, int(config.P2PPort)); err != nil {
- return nil, fmt.Errorf("invalid P2P port: %w", err)
- }
- if err := s.validatePort(config.RPCHost, int(config.RPCPort)); err != nil {
- return nil, fmt.Errorf("invalid RPC port: %w", err)
- }
-
- // Create deployment config
- deploymentConfig := &types.BesuNodeDeploymentConfig{
- BaseDeploymentConfig: types.BaseDeploymentConfig{
- Type: "besu",
- Mode: string(config.Mode),
- },
- KeyID: config.KeyID,
- P2PPort: config.P2PPort,
- RPCPort: config.RPCPort,
- NetworkID: config.NetworkID,
- ExternalIP: config.ExternalIP,
- P2PHost: config.P2PHost,
- RPCHost: config.RPCHost,
- InternalIP: config.InternalIP,
- EnodeURL: enodeURL,
- }
-
- // Update node endpoint
- endpoint := fmt.Sprintf("%s:%d", config.P2PHost, config.P2PPort)
- _, err = s.db.UpdateNodeEndpoint(ctx, &db.UpdateNodeEndpointParams{
- ID: dbNode.ID,
- Endpoint: sql.NullString{
- String: endpoint,
- Valid: true,
- },
- })
- if err != nil {
- return nil, fmt.Errorf("failed to update node endpoint: %w", err)
- }
-
- // Update node public endpoint if external IP is set
- if config.ExternalIP != "" {
- publicEndpoint := fmt.Sprintf("%s:%d", config.ExternalIP, config.P2PPort)
- _, err = s.db.UpdateNodePublicEndpoint(ctx, &db.UpdateNodePublicEndpointParams{
- ID: dbNode.ID,
- PublicEndpoint: sql.NullString{
- String: publicEndpoint,
- Valid: true,
- },
- })
- if err != nil {
- return nil, fmt.Errorf("failed to update node public endpoint: %w", err)
- }
- }
-
- return deploymentConfig, nil
-}
-
// validatePort checks if a port is valid and available
func (s *NodeService) validatePort(host string, port int) error {
if port < 1 || port > 65535 {
@@ -931,11 +691,20 @@ func (s *NodeService) mapDBNodeToServiceNode(dbNode *db.Node) (*Node, *NodeRespo
KeyID: config.KeyID,
Mode: config.Mode,
BootNodes: config.BootNodes,
+ // Add metrics configuration
+ MetricsEnabled: config.MetricsEnabled,
+ MetricsHost: "0.0.0.0", // Default to allow metrics from any interface
+ MetricsPort: uint(config.MetricsPort),
+ MetricsProtocol: config.MetricsProtocol,
}
deployConfig, ok := deploymentConfig.(*types.BesuNodeDeploymentConfig)
if ok {
nodeResponse.BesuNode.KeyID = deployConfig.KeyID
nodeResponse.BesuNode.EnodeURL = deployConfig.EnodeURL
+ // Add metrics configuration from deployment config
+ nodeResponse.BesuNode.MetricsEnabled = deployConfig.MetricsEnabled
+ nodeResponse.BesuNode.MetricsPort = uint(deployConfig.MetricsPort)
+ nodeResponse.BesuNode.MetricsProtocol = deployConfig.MetricsProtocol
}
}
}
@@ -969,6 +738,14 @@ func (s *NodeService) StopNode(ctx context.Context, id int64) (*NodeResponse, er
return nil, fmt.Errorf("failed to update node status: %w", err)
}
+ // Create stopping event
+ if err := s.eventService.CreateEvent(ctx, id, NodeEventStopping, map[string]interface{}{
+ "node_id": id,
+ "name": node.Name,
+ }); err != nil {
+ s.logger.Error("Failed to create stopping event", "error", err)
+ }
+
var stopErr error
switch types.NodeType(node.NodeType.String) {
case types.NodeTypeFabricPeer:
@@ -987,6 +764,14 @@ func (s *NodeService) StopNode(ctx context.Context, id int64) (*NodeResponse, er
if err := s.updateNodeStatusWithError(ctx, id, types.NodeStatusError, fmt.Sprintf("Failed to stop node: %v", stopErr)); err != nil {
s.logger.Error("Failed to update node status after stop error", "error", err)
}
+ // Create error event
+ if err := s.eventService.CreateEvent(ctx, id, NodeEventError, map[string]interface{}{
+ "node_id": id,
+ "name": node.Name,
+ "error": stopErr.Error(),
+ }); err != nil {
+ s.logger.Error("Failed to create error event", "error", err)
+ }
return nil, fmt.Errorf("failed to stop node: %w", stopErr)
}
@@ -994,8 +779,16 @@ func (s *NodeService) StopNode(ctx context.Context, id int64) (*NodeResponse, er
if err := s.updateNodeStatus(ctx, id, types.NodeStatusStopped); err != nil {
return nil, fmt.Errorf("failed to update node status: %w", err)
}
- _, nodeResponse := s.mapDBNodeToServiceNode(node)
+ // Create stopped event
+ if err := s.eventService.CreateEvent(ctx, id, NodeEventStopped, map[string]interface{}{
+ "node_id": id,
+ "name": node.Name,
+ }); err != nil {
+ s.logger.Error("Failed to create stopped event", "error", err)
+ }
+
+ _, nodeResponse := s.mapDBNodeToServiceNode(node)
return nodeResponse, nil
}
@@ -1006,6 +799,14 @@ func (s *NodeService) startNode(ctx context.Context, dbNode *db.Node) error {
return fmt.Errorf("failed to update node status: %w", err)
}
+ // Create starting event
+ if err := s.eventService.CreateEvent(ctx, dbNode.ID, NodeEventStarting, map[string]interface{}{
+ "node_id": dbNode.ID,
+ "name": dbNode.Name,
+ }); err != nil {
+ s.logger.Error("Failed to create starting event", "error", err)
+ }
+
var startErr error
switch types.NodeType(dbNode.NodeType.String) {
case types.NodeTypeFabricPeer:
@@ -1024,6 +825,14 @@ func (s *NodeService) startNode(ctx context.Context, dbNode *db.Node) error {
if err := s.updateNodeStatusWithError(ctx, dbNode.ID, types.NodeStatusError, fmt.Sprintf("Failed to start node: %v", startErr)); err != nil {
s.logger.Error("Failed to update node status after start error", "error", err)
}
+ // Create error event
+ if err := s.eventService.CreateEvent(ctx, dbNode.ID, NodeEventError, map[string]interface{}{
+ "node_id": dbNode.ID,
+ "name": dbNode.Name,
+ "error": startErr.Error(),
+ }); err != nil {
+ s.logger.Error("Failed to create error event", "error", err)
+ }
return fmt.Errorf("failed to start node: %w", startErr)
}
@@ -1032,302 +841,17 @@ func (s *NodeService) startNode(ctx context.Context, dbNode *db.Node) error {
return fmt.Errorf("failed to update node status: %w", err)
}
- return nil
-}
-
-// startFabricPeer starts a Fabric peer node
-func (s *NodeService) startFabricPeer(ctx context.Context, dbNode *db.Node) error {
-
- nodeConfig, err := utils.LoadNodeConfig([]byte(dbNode.NodeConfig.String))
- if err != nil {
- return fmt.Errorf("failed to deserialize node config: %w", err)
- }
- peerNodeConfig, ok := nodeConfig.(*types.FabricPeerConfig)
- if !ok {
- return fmt.Errorf("failed to assert node config to FabricPeerConfig")
- }
-
- deploymentConfig, err := utils.DeserializeDeploymentConfig(dbNode.DeploymentConfig.String)
- if err != nil {
- return fmt.Errorf("failed to deserialize deployment config: %w", err)
- }
- s.logger.Info("Starting fabric peer", "deploymentConfig", deploymentConfig)
-
- peerConfig := deploymentConfig.ToFabricPeerConfig()
-
- org, err := s.orgService.GetOrganization(ctx, peerConfig.OrganizationID)
- if err != nil {
- return fmt.Errorf("failed to get organization: %w", err)
- }
-
- localPeer := s.getPeerFromConfig(dbNode, org, peerNodeConfig)
-
- _, err = localPeer.Start()
- if err != nil {
- return fmt.Errorf("failed to start peer: %w", err)
- }
-
- return nil
-}
-
-// stopFabricPeer stops a Fabric peer node
-func (s *NodeService) stopFabricPeer(ctx context.Context, dbNode *db.Node) error {
- deploymentConfig, err := utils.DeserializeDeploymentConfig(dbNode.NodeConfig.String)
- if err != nil {
- return fmt.Errorf("failed to deserialize deployment config: %w", err)
- }
- nodeConfig, err := utils.LoadNodeConfig([]byte(dbNode.NodeConfig.String))
- if err != nil {
- return fmt.Errorf("failed to deserialize node config: %w", err)
- }
- peerNodeConfig, ok := nodeConfig.(*types.FabricPeerConfig)
- if !ok {
- return fmt.Errorf("failed to assert node config to FabricPeerConfig")
- }
- s.logger.Debug("peerNodeConfig", "peerNodeConfig", peerNodeConfig)
- peerConfig := deploymentConfig.ToFabricPeerConfig()
- s.logger.Debug("peerConfig", "peerConfig", peerConfig)
- org, err := s.orgService.GetOrganization(ctx, peerNodeConfig.OrganizationID)
- if err != nil {
- return fmt.Errorf("failed to get organization: %w", err)
- }
-
- localPeer := s.getPeerFromConfig(dbNode, org, peerNodeConfig)
-
- err = localPeer.Stop()
- if err != nil {
- return fmt.Errorf("failed to stop peer: %w", err)
- }
-
- return nil
-}
-
-// startFabricOrderer starts a Fabric orderer node
-func (s *NodeService) startFabricOrderer(ctx context.Context, dbNode *db.Node) error {
- nodeConfig, err := utils.LoadNodeConfig([]byte(dbNode.NodeConfig.String))
- if err != nil {
- return fmt.Errorf("failed to deserialize node config: %w", err)
- }
- ordererNodeConfig, ok := nodeConfig.(*types.FabricOrdererConfig)
- if !ok {
- return fmt.Errorf("failed to assert node config to FabricOrdererConfig")
- }
-
- org, err := s.orgService.GetOrganization(ctx, ordererNodeConfig.OrganizationID)
- if err != nil {
- return fmt.Errorf("failed to get organization: %w", err)
- }
-
- localOrderer := s.getOrdererFromConfig(dbNode, org, ordererNodeConfig)
-
- _, err = localOrderer.Start()
- if err != nil {
- return fmt.Errorf("failed to start orderer: %w", err)
- }
-
- return nil
-}
-
-// stopFabricOrderer stops a Fabric orderer node
-func (s *NodeService) stopFabricOrderer(ctx context.Context, dbNode *db.Node) error {
- nodeConfig, err := utils.LoadNodeConfig([]byte(dbNode.NodeConfig.String))
- if err != nil {
- return fmt.Errorf("failed to deserialize node config: %w", err)
- }
- ordererNodeConfig, ok := nodeConfig.(*types.FabricOrdererConfig)
- if !ok {
- return fmt.Errorf("failed to assert node config to FabricOrdererConfig")
- }
-
- org, err := s.orgService.GetOrganization(ctx, ordererNodeConfig.OrganizationID)
- if err != nil {
- return fmt.Errorf("failed to get organization: %w", err)
- }
-
- localOrderer := s.getOrdererFromConfig(dbNode, org, ordererNodeConfig)
-
- err = localOrderer.Stop()
- if err != nil {
- return fmt.Errorf("failed to stop orderer: %w", err)
- }
-
- return nil
-}
-
-func (s *NodeService) getBesuFromConfig(ctx context.Context, dbNode *db.Node, config *types.BesuNodeConfig, deployConfig *types.BesuNodeDeploymentConfig) (*besu.LocalBesu, error) {
- network, err := s.db.GetNetwork(ctx, deployConfig.NetworkID)
- if err != nil {
- return nil, fmt.Errorf("failed to get network: %w", err)
- }
- key, err := s.keymanagementService.GetKey(ctx, int(config.KeyID))
- if err != nil {
- return nil, fmt.Errorf("failed to get key: %w", err)
- }
- privateKeyDecrypted, err := s.keymanagementService.GetDecryptedPrivateKey(int(config.KeyID))
- if err != nil {
- return nil, fmt.Errorf("failed to decrypt key: %w", err)
- }
- var networkConfig networktypes.BesuNetworkConfig
- if err := json.Unmarshal([]byte(network.Config.String), &networkConfig); err != nil {
- return nil, fmt.Errorf("failed to unmarshal network config: %w", err)
- }
-
- localBesu := besu.NewLocalBesu(
- besu.StartBesuOpts{
- ID: dbNode.Slug,
- GenesisFile: network.GenesisBlockB64.String,
- NetworkID: deployConfig.NetworkID,
- P2PPort: fmt.Sprintf("%d", deployConfig.P2PPort),
- RPCPort: fmt.Sprintf("%d", deployConfig.RPCPort),
- ListenAddress: deployConfig.P2PHost,
- MinerAddress: key.EthereumAddress,
- ConsensusType: "qbft", // TODO: get consensus type from network
- BootNodes: config.BootNodes,
- Version: "25.4.1", // TODO: get version from network
- NodePrivateKey: strings.TrimPrefix(privateKeyDecrypted, "0x"),
- Env: config.Env,
- P2PHost: config.P2PHost,
- RPCHost: config.RPCHost,
- },
- string(config.Mode),
- dbNode.ID,
- s.logger,
- s.configService,
- s.settingsService,
- networkConfig,
- )
-
- return localBesu, nil
-}
-
-// stopBesuNode stops a Besu node
-func (s *NodeService) stopBesuNode(ctx context.Context, dbNode *db.Node) error {
- // Load node configuration
- nodeConfig, err := utils.LoadNodeConfig([]byte(dbNode.NodeConfig.String))
- if err != nil {
- return fmt.Errorf("failed to deserialize node config: %w", err)
- }
- besuNodeConfig, ok := nodeConfig.(*types.BesuNodeConfig)
- if !ok {
- return fmt.Errorf("failed to assert node config to BesuNodeConfig")
- }
-
- // Load deployment configuration
- deploymentConfig, err := utils.DeserializeDeploymentConfig(dbNode.DeploymentConfig.String)
- if err != nil {
- return fmt.Errorf("failed to deserialize deployment config: %w", err)
- }
- besuDeployConfig, ok := deploymentConfig.(*types.BesuNodeDeploymentConfig)
- if !ok {
- return fmt.Errorf("failed to assert deployment config to BesuNodeDeploymentConfig")
- }
-
- // Get Besu instance
- localBesu, err := s.getBesuFromConfig(ctx, dbNode, besuNodeConfig, besuDeployConfig)
- if err != nil {
- return fmt.Errorf("failed to get besu instance: %w", err)
- }
-
- // Stop the node
- err = localBesu.Stop()
- if err != nil {
- return fmt.Errorf("failed to stop besu node: %w", err)
- }
-
- return nil
-}
-
-// startBesuNode starts a Besu node
-func (s *NodeService) startBesuNode(ctx context.Context, dbNode *db.Node) error {
- // Load node configuration
- nodeConfig, err := utils.LoadNodeConfig([]byte(dbNode.NodeConfig.String))
- if err != nil {
- return fmt.Errorf("failed to deserialize node config: %w", err)
- }
- besuNodeConfig, ok := nodeConfig.(*types.BesuNodeConfig)
- if !ok {
- return fmt.Errorf("failed to assert node config to BesuNodeConfig")
- }
-
- // Load deployment configuration
- deploymentConfig, err := utils.DeserializeDeploymentConfig(dbNode.DeploymentConfig.String)
- if err != nil {
- return fmt.Errorf("failed to deserialize deployment config: %w", err)
- }
- besuDeployConfig, ok := deploymentConfig.(*types.BesuNodeDeploymentConfig)
- if !ok {
- return fmt.Errorf("failed to assert deployment config to BesuNodeDeploymentConfig")
- }
-
- // Get key for node
- key, err := s.keymanagementService.GetKey(ctx, int(besuNodeConfig.KeyID))
- if err != nil {
- return fmt.Errorf("failed to get key: %w", err)
- }
- network, err := s.db.GetNetwork(ctx, besuDeployConfig.NetworkID)
- if err != nil {
- return fmt.Errorf("failed to get network: %w", err)
- }
- privateKeyDecrypted, err := s.keymanagementService.GetDecryptedPrivateKey(int(besuNodeConfig.KeyID))
- if err != nil {
- return fmt.Errorf("failed to decrypt key: %w", err)
- }
- var networkConfig networktypes.BesuNetworkConfig
- if err := json.Unmarshal([]byte(network.Config.String), &networkConfig); err != nil {
- return fmt.Errorf("failed to unmarshal network config: %w", err)
- }
-
- // Create LocalBesu instance
- localBesu := besu.NewLocalBesu(
- besu.StartBesuOpts{
- ID: dbNode.Slug,
- GenesisFile: network.GenesisBlockB64.String,
- NetworkID: besuDeployConfig.NetworkID,
- ChainID: networkConfig.ChainID,
- P2PPort: fmt.Sprintf("%d", besuDeployConfig.P2PPort),
- RPCPort: fmt.Sprintf("%d", besuDeployConfig.RPCPort),
- ListenAddress: besuDeployConfig.P2PHost,
- MinerAddress: key.EthereumAddress,
- ConsensusType: "qbft", // TODO: get consensus type from network
- BootNodes: besuNodeConfig.BootNodes,
- Version: "25.4.1", // TODO: get version from network
- NodePrivateKey: strings.TrimPrefix(privateKeyDecrypted, "0x"),
- Env: besuNodeConfig.Env,
- P2PHost: besuNodeConfig.P2PHost,
- RPCHost: besuNodeConfig.RPCHost,
- },
- string(besuNodeConfig.Mode),
- dbNode.ID,
- s.logger,
- s.configService,
- s.settingsService,
- networkConfig,
- )
-
- // Start the node
- _, err = localBesu.Start()
- if err != nil {
- return fmt.Errorf("failed to start besu node: %w", err)
+ // Create started event
+ if err := s.eventService.CreateEvent(ctx, dbNode.ID, NodeEventStarted, map[string]interface{}{
+ "node_id": dbNode.ID,
+ "name": dbNode.Name,
+ }); err != nil {
+ s.logger.Error("Failed to create started event", "error", err)
}
- s.logger.Info("Started Besu node",
- "nodeID", dbNode.ID,
- "name", dbNode.Name,
- "networkID", besuDeployConfig.NetworkID,
- )
-
return nil
}
-// Helper function to format arguments for launchd plist
-func (s *NodeService) formatPlistArgs(args []string) string {
- var plistArgs strings.Builder
- for _, arg := range args {
- plistArgs.WriteString(fmt.Sprintf(" %s\n", arg))
- }
- return plistArgs.String()
-}
-
// DeleteNode deletes a node by ID
func (s *NodeService) DeleteNode(ctx context.Context, id int64) error {
// Get the node first to check its type and deployment config
@@ -1364,401 +888,75 @@ func (s *NodeService) DeleteNode(ctx context.Context, id int64) error {
return fmt.Errorf("failed to delete node from database: %w", err)
}
+ // Publish node deleted event
+ s.metricsService.Reload(ctx)
+
return nil
}
-// cleanupPeerResources cleans up resources specific to a Fabric peer node
-func (s *NodeService) cleanupPeerResources(ctx context.Context, node *db.Node) error {
- // Clean up peer-specific directories
- dirsToClean := []string{
- filepath.Join(s.configService.GetDataPath(), "nodes", node.Slug),
- filepath.Join(s.configService.GetDataPath(), "peers", node.Slug),
- filepath.Join(s.configService.GetDataPath(), "fabric", "peers", node.Slug),
+// Update cleanupNodeResources to use the new function
+func (s *NodeService) cleanupNodeResources(ctx context.Context, node *db.Node) error {
+ // Get the home directory
+ homeDir, err := os.UserHomeDir()
+ if err != nil {
+ return fmt.Errorf("failed to get home directory: %w", err)
+ }
+
+ deploymentConfig, err := utils.DeserializeDeploymentConfig(node.DeploymentConfig.String)
+ if err != nil {
+ return fmt.Errorf("failed to deserialize deployment config: %w", err)
}
- for _, dir := range dirsToClean {
- if err := os.RemoveAll(dir); err != nil {
+ // Clean up service files based on platform
+ switch runtime.GOOS {
+ case "linux":
+ // Remove systemd service file
+ serviceFile := fmt.Sprintf("/etc/systemd/system/%s.service", deploymentConfig.GetServiceName())
+ if err := os.Remove(serviceFile); err != nil {
if !os.IsNotExist(err) {
- s.logger.Warn("Failed to remove peer directory",
- "path", dir,
- "error", err)
+ s.logger.Warn("Failed to remove systemd service file", "error", err)
}
- } else {
- s.logger.Info("Successfully removed peer directory",
- "path", dir)
}
- }
-
- return nil
-}
-
-// cleanupOrdererResources cleans up resources specific to a Fabric orderer node
-func (s *NodeService) cleanupOrdererResources(ctx context.Context, node *db.Node) error {
- // Clean up orderer-specific directories
- dirsToClean := []string{
- filepath.Join(s.configService.GetDataPath(), "nodes", node.Slug),
- filepath.Join(s.configService.GetDataPath(), "orderers", node.Slug),
- filepath.Join(s.configService.GetDataPath(), "fabric", "orderers", node.Slug),
- }
-
- for _, dir := range dirsToClean {
- if err := os.RemoveAll(dir); err != nil {
+ case "darwin":
+ // Remove launchd plist file
+ plistFile := filepath.Join(homeDir, "Library/LaunchAgents", fmt.Sprintf("dev.chainlaunch.%s.plist", deploymentConfig.GetServiceName()))
+ if err := os.Remove(plistFile); err != nil {
if !os.IsNotExist(err) {
- s.logger.Warn("Failed to remove orderer directory",
- "path", dir,
- "error", err)
+ s.logger.Warn("Failed to remove launchd plist file", "error", err)
}
- } else {
- s.logger.Info("Successfully removed orderer directory",
- "path", dir)
}
}
+ // Clean up node-specific resources based on type
+ switch types.NodeType(node.NodeType.String) {
+ case types.NodeTypeFabricPeer:
+ if err := s.cleanupPeerResources(ctx, node); err != nil {
+ s.logger.Warn("Failed to cleanup peer resources", "error", err)
+ }
+ case types.NodeTypeFabricOrderer:
+ if err := s.cleanupOrdererResources(ctx, node); err != nil {
+ s.logger.Warn("Failed to cleanup orderer resources", "error", err)
+ }
+ case types.NodeTypeBesuFullnode:
+ if err := s.cleanupBesuResources(ctx, node); err != nil {
+ s.logger.Warn("Failed to cleanup besu resources", "error", err)
+ }
+ default:
+ s.logger.Warn("Unknown node type for cleanup", "type", node.NodeType.String)
+ }
+
return nil
}
-// cleanupBesuResources cleans up resources specific to a Besu node
-func (s *NodeService) cleanupBesuResources(ctx context.Context, node *db.Node) error {
-
- // Load node configuration
- nodeConfig, err := utils.LoadNodeConfig([]byte(node.NodeConfig.String))
+func (s *NodeService) GetNodeLogPath(ctx context.Context, node *NodeResponse) (string, error) {
+ dbNode, err := s.db.GetNode(ctx, node.ID)
if err != nil {
- s.logger.Warn("Failed to load node config during cleanup", "error", err)
- // Continue with cleanup even if config loading fails
+ return "", fmt.Errorf("failed to get node: %w", err)
}
- // Load deployment configuration
- deploymentConfig, err := utils.DeserializeDeploymentConfig(node.DeploymentConfig.String)
- if err != nil {
- s.logger.Warn("Failed to load deployment config during cleanup", "error", err)
- // Continue with cleanup even if config loading fails
- }
-
- // Create Besu instance for cleanup
- var localBesu *besu.LocalBesu
- if nodeConfig != nil && deploymentConfig != nil {
- besuNodeConfig, ok := nodeConfig.(*types.BesuNodeConfig)
- if !ok {
- s.logger.Warn("Invalid node config type during cleanup")
- }
- besuDeployConfig, ok := deploymentConfig.(*types.BesuNodeDeploymentConfig)
- if !ok {
- s.logger.Warn("Invalid deployment config type during cleanup")
- }
- if besuNodeConfig != nil && besuDeployConfig != nil {
- localBesu, err = s.getBesuFromConfig(ctx, node, besuNodeConfig, besuDeployConfig)
- if err != nil {
- s.logger.Warn("Failed to create Besu instance during cleanup", "error", err)
- }
- }
- }
-
- // Stop the service if it's running and we have a valid Besu instance
- if localBesu != nil {
- if err := localBesu.Stop(); err != nil {
- s.logger.Warn("Failed to stop Besu service during cleanup", "error", err)
- // Continue with cleanup even if stop fails
- }
- }
-
- // Clean up Besu-specific directories
- dirsToClean := []string{
- filepath.Join(s.configService.GetDataPath(), "nodes", node.Slug),
- filepath.Join(s.configService.GetDataPath(), "besu", node.Slug),
- filepath.Join(s.configService.GetDataPath(), "besu", "nodes", node.Slug),
- }
-
- for _, dir := range dirsToClean {
- if err := os.RemoveAll(dir); err != nil {
- if !os.IsNotExist(err) {
- s.logger.Warn("Failed to remove Besu directory",
- "path", dir,
- "error", err)
- }
- } else {
- s.logger.Info("Successfully removed Besu directory",
- "path", dir)
- }
- }
-
- // Clean up service files based on platform
- switch runtime.GOOS {
- case "linux":
- // Remove systemd service file
- if localBesu != nil {
- serviceFile := fmt.Sprintf("/etc/systemd/system/besu-%s.service", node.Slug)
- if err := os.Remove(serviceFile); err != nil {
- if !os.IsNotExist(err) {
- s.logger.Warn("Failed to remove systemd service file", "error", err)
- }
- }
- }
-
- case "darwin":
- // Remove launchd plist file
- homeDir, err := os.UserHomeDir()
- if err != nil {
- return fmt.Errorf("failed to get home directory: %w", err)
- }
- if localBesu != nil {
- plistFile := filepath.Join(homeDir, "Library/LaunchAgents", fmt.Sprintf("dev.chainlaunch.besu.%s.plist", node.Slug))
- if err := os.Remove(plistFile); err != nil {
- if !os.IsNotExist(err) {
- s.logger.Warn("Failed to remove launchd plist file", "error", err)
- }
- }
- }
- }
-
- // Clean up any data directories
- dataDir := filepath.Join(s.configService.GetDataPath(), "data", "besu", node.Slug)
- if err := os.RemoveAll(dataDir); err != nil {
- if !os.IsNotExist(err) {
- s.logger.Warn("Failed to remove Besu data directory",
- "path", dataDir,
- "error", err)
- }
- } else {
- s.logger.Info("Successfully removed Besu data directory",
- "path", dataDir)
- }
-
- return nil
-}
-
-// Update cleanupNodeResources to use the new function
-func (s *NodeService) cleanupNodeResources(ctx context.Context, node *db.Node) error {
- // Get the home directory
- homeDir, err := os.UserHomeDir()
- if err != nil {
- return fmt.Errorf("failed to get home directory: %w", err)
- }
-
- deploymentConfig, err := utils.DeserializeDeploymentConfig(node.DeploymentConfig.String)
- if err != nil {
- return fmt.Errorf("failed to deserialize deployment config: %w", err)
- }
-
- // Clean up service files based on platform
- switch runtime.GOOS {
- case "linux":
- // Remove systemd service file
- serviceFile := fmt.Sprintf("/etc/systemd/system/%s.service", deploymentConfig.GetServiceName())
- if err := os.Remove(serviceFile); err != nil {
- if !os.IsNotExist(err) {
- s.logger.Warn("Failed to remove systemd service file", "error", err)
- }
- }
-
- case "darwin":
- // Remove launchd plist file
- plistFile := filepath.Join(homeDir, "Library/LaunchAgents", fmt.Sprintf("dev.chainlaunch.%s.plist", deploymentConfig.GetServiceName()))
- if err := os.Remove(plistFile); err != nil {
- if !os.IsNotExist(err) {
- s.logger.Warn("Failed to remove launchd plist file", "error", err)
- }
- }
- }
-
- // Clean up node-specific resources based on type
- switch types.NodeType(node.NodeType.String) {
- case types.NodeTypeFabricPeer:
- if err := s.cleanupPeerResources(ctx, node); err != nil {
- s.logger.Warn("Failed to cleanup peer resources", "error", err)
- }
- case types.NodeTypeFabricOrderer:
- if err := s.cleanupOrdererResources(ctx, node); err != nil {
- s.logger.Warn("Failed to cleanup orderer resources", "error", err)
- }
- case types.NodeTypeBesuFullnode:
- if err := s.cleanupBesuResources(ctx, node); err != nil {
- s.logger.Warn("Failed to cleanup besu resources", "error", err)
- }
- default:
- s.logger.Warn("Unknown node type for cleanup", "type", node.NodeType.String)
- }
-
- return nil
-}
-
-// GetFabricPeerDefaults returns default values for a Fabric peer node
-func (s *NodeService) GetFabricPeerDefaults() *NodeDefaults {
- // Get available ports for peer services
- listen, chaincode, events, operations, err := GetPeerPorts(7051)
- if err != nil {
- // If we can't get the preferred ports, try from a higher range
- listen, chaincode, events, operations, err = GetPeerPorts(10000)
- if err != nil {
- s.logger.Error("Failed to get available ports for peer", "error", err)
- // Fall back to default ports if all attempts fail
- return &NodeDefaults{
- ListenAddress: "0.0.0.0:7051",
- ExternalEndpoint: "localhost:7051",
- ChaincodeAddress: "0.0.0.0:7052",
- EventsAddress: "0.0.0.0:7053",
- OperationsListenAddress: "0.0.0.0:9443",
- Mode: ModeService,
- ServiceName: "fabric-peer",
- LogPath: "/var/log/fabric/peer.log",
- ErrorLogPath: "/var/log/fabric/peer.err",
- }
- }
- }
-
- return &NodeDefaults{
- ListenAddress: fmt.Sprintf("0.0.0.0:%d", listen),
- ExternalEndpoint: fmt.Sprintf("localhost:%d", listen),
- ChaincodeAddress: fmt.Sprintf("0.0.0.0:%d", chaincode),
- EventsAddress: fmt.Sprintf("0.0.0.0:%d", events),
- OperationsListenAddress: fmt.Sprintf("0.0.0.0:%d", operations),
- Mode: ModeService,
- ServiceName: "fabric-peer",
- LogPath: "/var/log/fabric/peer.log",
- ErrorLogPath: "/var/log/fabric/peer.err",
- }
-}
-
-// GetFabricOrdererDefaults returns default values for a Fabric orderer node
-func (s *NodeService) GetFabricOrdererDefaults() *NodeDefaults {
- // Get available ports for orderer services
- listen, admin, operations, err := GetOrdererPorts(7050)
- if err != nil {
- // If we can't get the preferred ports, try from a higher range
- listen, admin, operations, err = GetOrdererPorts(10000)
- if err != nil {
- s.logger.Error("Failed to get available ports for orderer", "error", err)
- // Fall back to default ports if all attempts fail
- return &NodeDefaults{
- ListenAddress: "0.0.0.0:7050",
- ExternalEndpoint: "localhost:7050",
- AdminAddress: "0.0.0.0:7053",
- OperationsListenAddress: "0.0.0.0:8443",
- Mode: ModeService,
- ServiceName: "fabric-orderer",
- LogPath: "/var/log/fabric/orderer.log",
- ErrorLogPath: "/var/log/fabric/orderer.err",
- }
- }
- }
-
- return &NodeDefaults{
- ListenAddress: fmt.Sprintf("0.0.0.0:%d", listen),
- ExternalEndpoint: fmt.Sprintf("localhost:%d", listen),
- AdminAddress: fmt.Sprintf("0.0.0.0:%d", admin),
- OperationsListenAddress: fmt.Sprintf("0.0.0.0:%d", operations),
- Mode: ModeService,
- ServiceName: "fabric-orderer",
- LogPath: "/var/log/fabric/orderer.log",
- ErrorLogPath: "/var/log/fabric/orderer.err",
- }
-}
-
-// Update the port offsets and base ports to prevent overlap
-const (
- // Base ports for peers and orderers with sufficient spacing
- peerBasePort = 7000 // Starting port for peers
- ordererBasePort = 9000 // Starting port for orderers with 2000 port gap
-
- // Port offsets to ensure no overlap within node types
- peerPortOffset = 100 // Each peer gets a 100 port range
- ordererPortOffset = 100 // Each orderer gets a 100 port range
-
- maxPortAttempts = 100 // Maximum attempts to find available ports
-)
-
-// GetFabricNodesDefaults returns default values for multiple nodes with guaranteed non-overlapping ports
-func (s *NodeService) GetFabricNodesDefaults(params NodesDefaultsParams) (*NodesDefaultsResult, error) {
- // Validate node counts
- if params.PeerCount > 15 {
- return nil, fmt.Errorf("peer count exceeds maximum supported nodes (15)")
- }
- if params.OrdererCount > 15 {
- return nil, fmt.Errorf("orderer count exceeds maximum supported nodes (15)")
- }
-
- result := &NodesDefaultsResult{
- Peers: make([]NodeDefaults, params.PeerCount),
- Orderers: make([]NodeDefaults, params.OrdererCount),
- AvailableAddresses: []string{"localhost", "0.0.0.0"},
- }
-
- // Generate peer defaults with incremental ports
- // Each peer needs 4 ports (listen, chaincode, events, operations)
- for i := 0; i < params.PeerCount; i++ {
- basePort := peerBasePort + (i * peerPortOffset)
- listen, chaincode, events, operations, err := GetPeerPorts(basePort)
- if err != nil {
- // Try with a higher range if initial attempt fails
- listen, chaincode, events, operations, err = GetPeerPorts(10000 + (i * peerPortOffset))
- if err != nil {
- return nil, fmt.Errorf("failed to get peer ports: %w", err)
- }
- }
-
- // Validate that ports don't overlap with orderer range
- if listen >= ordererBasePort || chaincode >= ordererBasePort ||
- events >= ordererBasePort || operations >= ordererBasePort {
- return nil, fmt.Errorf("peer ports would overlap with orderer port range")
- }
-
- result.Peers[i] = NodeDefaults{
- ListenAddress: fmt.Sprintf("0.0.0.0:%d", listen),
- ExternalEndpoint: fmt.Sprintf("localhost:%d", listen),
- ChaincodeAddress: fmt.Sprintf("0.0.0.0:%d", chaincode),
- EventsAddress: fmt.Sprintf("0.0.0.0:%d", events),
- OperationsListenAddress: fmt.Sprintf("0.0.0.0:%d", operations),
- Mode: params.Mode,
- ServiceName: fmt.Sprintf("fabric-peer-%d", i+1),
- LogPath: fmt.Sprintf("/var/log/fabric/peer%d.log", i+1),
- ErrorLogPath: fmt.Sprintf("/var/log/fabric/peer%d.err", i+1),
- }
- }
-
- // Generate orderer defaults with incremental ports
- // Each orderer needs 3 ports (listen, admin, operations)
- for i := 0; i < params.OrdererCount; i++ {
- basePort := ordererBasePort + (i * ordererPortOffset)
- listen, admin, operations, err := GetOrdererPorts(basePort)
- if err != nil {
- // Try with a higher range if initial attempt fails
- listen, admin, operations, err = GetOrdererPorts(11000 + (i * ordererPortOffset))
- if err != nil {
- return nil, fmt.Errorf("failed to get orderer ports: %w", err)
- }
- }
-
- // Validate that ports don't overlap with peer range
- maxPeerPort := peerBasePort + (15 * peerPortOffset) // Account for maximum possible peers
- if listen <= maxPeerPort ||
- admin <= maxPeerPort ||
- operations <= maxPeerPort {
- return nil, fmt.Errorf("orderer ports would overlap with peer port range")
- }
-
- result.Orderers[i] = NodeDefaults{
- ListenAddress: fmt.Sprintf("0.0.0.0:%d", listen),
- ExternalEndpoint: fmt.Sprintf("localhost:%d", listen),
- AdminAddress: fmt.Sprintf("0.0.0.0:%d", admin),
- OperationsListenAddress: fmt.Sprintf("0.0.0.0:%d", operations),
- Mode: params.Mode,
- ServiceName: fmt.Sprintf("fabric-orderer-%d", i+1),
- LogPath: fmt.Sprintf("/var/log/fabric/orderer%d.log", i+1),
- ErrorLogPath: fmt.Sprintf("/var/log/fabric/orderer%d.err", i+1),
- }
- }
-
- return result, nil
-}
-
-func (s *NodeService) GetNodeLogPath(ctx context.Context, node *NodeResponse) (string, error) {
- dbNode, err := s.db.GetNode(ctx, node.ID)
- if err != nil {
- return "", fmt.Errorf("failed to get node: %w", err)
- }
-
- // Get deployment config
- deploymentConfig, err := utils.DeserializeDeploymentConfig(dbNode.DeploymentConfig.String)
+ // Get deployment config
+ deploymentConfig, err := utils.DeserializeDeploymentConfig(dbNode.DeploymentConfig.String)
if err != nil {
return "", fmt.Errorf("failed to deserialize deployment config: %w", err)
}
@@ -1918,227 +1116,6 @@ func (s *NodeService) GetEventsByType(ctx context.Context, nodeID int64, eventTy
return s.eventService.GetEventsByType(ctx, nodeID, eventType, page, limit)
}
-// GetFabricPeer gets a Fabric peer node configuration
-func (s *NodeService) GetFabricPeer(ctx context.Context, id int64) (*peer.LocalPeer, error) {
- // Get the node from database
- node, err := s.db.GetNode(ctx, id)
- if err != nil {
- if err == sql.ErrNoRows {
- return nil, fmt.Errorf("peer node not found: %w", err)
- }
- return nil, fmt.Errorf("failed to get peer node: %w", err)
- }
-
- // Verify node type
- if types.NodeType(node.NodeType.String) != types.NodeTypeFabricPeer {
- return nil, fmt.Errorf("node %d is not a Fabric peer", id)
- }
-
- // Load node config
- nodeConfig, err := utils.LoadNodeConfig([]byte(node.NodeConfig.String))
- if err != nil {
- return nil, fmt.Errorf("failed to load peer config: %w", err)
- }
-
- // Type assert to FabricPeerConfig
- peerConfig, ok := nodeConfig.(*types.FabricPeerConfig)
- if !ok {
- return nil, fmt.Errorf("invalid peer config type")
- }
-
- // Get deployment config if available
- if node.DeploymentConfig.Valid {
- deploymentConfig, err := utils.DeserializeDeploymentConfig(node.DeploymentConfig.String)
- if err != nil {
- s.logger.Warn("Failed to deserialize deployment config", "error", err)
- } else {
- // Update config with deployment values
- if deployConfig, ok := deploymentConfig.(*types.FabricPeerDeploymentConfig); ok {
- peerConfig.ExternalEndpoint = deployConfig.ExternalEndpoint
- // Add any other deployment-specific fields that should be included
- }
- }
- }
-
- // Get organization
- org, err := s.orgService.GetOrganization(ctx, peerConfig.OrganizationID)
- if err != nil {
- return nil, fmt.Errorf("failed to get organization: %w", err)
- }
-
- // Create and return local peer
- localPeer := s.getPeerFromConfig(node, org, peerConfig)
- return localPeer, nil
-}
-
-// GetFabricOrderer gets a Fabric orderer node configuration
-func (s *NodeService) GetFabricOrderer(ctx context.Context, id int64) (*orderer.LocalOrderer, error) {
- // Get the node from database
- node, err := s.db.GetNode(ctx, id)
- if err != nil {
- if err == sql.ErrNoRows {
- return nil, fmt.Errorf("orderer node not found: %w", err)
- }
- return nil, fmt.Errorf("failed to get orderer node: %w", err)
- }
-
- // Verify node type
- if types.NodeType(node.NodeType.String) != types.NodeTypeFabricOrderer {
- return nil, fmt.Errorf("node %d is not a Fabric orderer", id)
- }
-
- // Load node config
- nodeConfig, err := utils.LoadNodeConfig([]byte(node.NodeConfig.String))
- if err != nil {
- return nil, fmt.Errorf("failed to load orderer config: %w", err)
- }
-
- // Type assert to FabricOrdererConfig
- ordererConfig, ok := nodeConfig.(*types.FabricOrdererConfig)
- if !ok {
- return nil, fmt.Errorf("invalid orderer config type")
- }
-
- // Get deployment config if available
- if node.DeploymentConfig.Valid {
- deploymentConfig, err := utils.DeserializeDeploymentConfig(node.DeploymentConfig.String)
- if err != nil {
- s.logger.Warn("Failed to deserialize deployment config", "error", err)
- } else {
- // Update config with deployment values
- if deployConfig, ok := deploymentConfig.(*types.FabricOrdererDeploymentConfig); ok {
- ordererConfig.ExternalEndpoint = deployConfig.ExternalEndpoint
- // Add any other deployment-specific fields that should be included
- }
- }
- }
-
- // Get organization
- org, err := s.orgService.GetOrganization(ctx, ordererConfig.OrganizationID)
- if err != nil {
- return nil, fmt.Errorf("failed to get organization: %w", err)
- }
-
- // Create and return local orderer
- localOrderer := s.getOrdererFromConfig(node, org, ordererConfig)
- return localOrderer, nil
-}
-
-// GetFabricNodesByOrganization gets all Fabric nodes (peers and orderers) for an organization
-func (s *NodeService) GetFabricNodesByOrganization(ctx context.Context, orgID int64) ([]NodeResponse, error) {
- // Get all nodes
- nodes, err := s.GetAllNodes(ctx)
- if err != nil {
- return nil, fmt.Errorf("failed to get nodes: %w", err)
- }
-
- // Filter nodes by organization
- var orgNodes []NodeResponse
- for _, node := range nodes.Items {
- // Check node type and config
- switch node.NodeType {
- case types.NodeTypeFabricPeer:
- if node.FabricPeer != nil {
- if node.FabricPeer.OrganizationID == orgID {
- orgNodes = append(orgNodes, node)
- }
- }
- case types.NodeTypeFabricOrderer:
- if node.FabricOrderer != nil {
- if node.FabricOrderer.OrganizationID == orgID {
- orgNodes = append(orgNodes, node)
- }
- }
- }
- }
-
- return orgNodes, nil
-}
-
-// GetBesuPorts attempts to find available ports for P2P and RPC, starting from default ports
-func GetBesuPorts(baseP2PPort, baseRPCPort uint) (p2pPort uint, rpcPort uint, err error) {
- maxAttempts := 100
- // Try to find available ports for P2P and RPC
- p2pPorts, err := findConsecutivePorts(int(baseP2PPort), 1, int(baseP2PPort)+maxAttempts)
- if err != nil {
- return 0, 0, fmt.Errorf("could not find available P2P port: %w", err)
- }
- p2pPort = uint(p2pPorts[0])
-
- rpcPorts, err := findConsecutivePorts(int(baseRPCPort), 1, int(baseRPCPort)+maxAttempts)
- if err != nil {
- return 0, 0, fmt.Errorf("could not find available RPC port: %w", err)
- }
- rpcPort = uint(rpcPorts[0])
-
- return p2pPort, rpcPort, nil
-}
-
-// GetBesuNodeDefaults returns the default configuration for Besu nodes
-func (s *NodeService) GetBesuNodeDefaults(besuNodes int) ([]BesuNodeDefaults, error) {
- // Validate node count
- if besuNodes <= 0 {
- besuNodes = 1
- }
- if besuNodes > 15 {
- return nil, fmt.Errorf("besu node count exceeds maximum supported nodes (15)")
- }
-
- // Get external IP for p2p communication
- externalIP, err := s.GetExternalIP()
- if err != nil {
- return nil, fmt.Errorf("failed to get external IP: %w", err)
- }
-
- // Use localhost for internal IP
- internalIP := "127.0.0.1"
-
- // Base ports for Besu nodes with sufficient spacing
- const (
- baseP2PPort = 30303 // Starting P2P port
- baseRPCPort = 8545 // Starting RPC port
- portOffset = 100 // Each node gets a 100 port range
- )
-
- // Create array to hold all node defaults
- nodeDefaults := make([]BesuNodeDefaults, besuNodes)
-
- // Generate defaults for each node
- for i := 0; i < besuNodes; i++ {
- // Try to get ports for each node
- p2pPort, rpcPort, err := GetBesuPorts(
- uint(baseP2PPort+(i*portOffset)),
- uint(baseRPCPort+(i*portOffset)),
- )
- if err != nil {
- // If we can't get the preferred ports, try from a higher range
- p2pPort, rpcPort, err = GetBesuPorts(
- uint(40303+(i*portOffset)),
- uint(18545+(i*portOffset)),
- )
- if err != nil {
- return nil, fmt.Errorf("failed to find available ports for node %d: %w", i+1, err)
- }
- }
-
- // Create node defaults with unique ports
- nodeDefaults[i] = BesuNodeDefaults{
- P2PHost: externalIP, // Use external IP for p2p host
- P2PPort: p2pPort,
- RPCHost: "0.0.0.0", // Allow RPC from any interface
- RPCPort: rpcPort,
- ExternalIP: externalIP,
- InternalIP: internalIP,
- Mode: ModeService,
- Env: map[string]string{
- "JAVA_OPTS": "-Xmx4g",
- },
- }
- }
-
- return nodeDefaults, nil
-}
-
// Add a method to get full node details when needed
func (s *NodeService) GetNodeWithConfig(ctx context.Context, id int64) (*Node, error) {
dbNode, err := s.db.GetNode(ctx, id)
@@ -2162,6 +1139,18 @@ type Channel struct {
CreatedAt time.Time `json:"createdAt"`
}
+func (s *NodeService) GetFabricChaincodes(ctx context.Context, id int64, channelID string) ([]*lifecycle.QueryChaincodeDefinitionsResult_ChaincodeDefinition, error) {
+ peer, err := s.GetFabricPeer(ctx, id)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get peer: %w", err)
+ }
+ committedChaincodes, err := peer.GetCommittedChaincodes(ctx, channelID)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get committed chaincodes: %w", err)
+ }
+ return committedChaincodes, nil
+}
+
// GetNodeChannels retrieves the list of channels for a Fabric node
func (s *NodeService) GetNodeChannels(ctx context.Context, id int64) ([]Channel, error) {
// Get the node first
@@ -2261,6 +1250,15 @@ func (s *NodeService) RenewCertificates(ctx context.Context, id int64) (*NodeRes
if err := s.updateNodeStatusWithError(ctx, id, types.NodeStatusError, fmt.Sprintf("Failed to renew certificates: %v", renewErr)); err != nil {
s.logger.Error("Failed to update node status after renewal error", "error", err)
}
+ // Create error event
+ if err := s.eventService.CreateEvent(ctx, id, NodeEventError, map[string]interface{}{
+ "node_id": id,
+ "name": node.Name,
+ "action": "certificate_renewal",
+ "error": renewErr.Error(),
+ }); err != nil {
+ s.logger.Error("Failed to create error event", "error", err)
+ }
return nil, fmt.Errorf("failed to renew certificates: %w", renewErr)
}
@@ -2278,68 +1276,6 @@ func (s *NodeService) RenewCertificates(ctx context.Context, id int64) (*NodeRes
return updatedNode, nil
}
-// renewPeerCertificates handles certificate renewal for a Fabric peer
-func (s *NodeService) renewPeerCertificates(ctx context.Context, dbNode *db.Node, deploymentConfig types.NodeDeploymentConfig) error {
- nodeConfig, err := utils.LoadNodeConfig([]byte(dbNode.NodeConfig.String))
- if err != nil {
- return fmt.Errorf("failed to load node config: %w", err)
- }
-
- peerConfig, ok := nodeConfig.(*types.FabricPeerConfig)
- if !ok {
- return fmt.Errorf("invalid peer config type")
- }
-
- peerDeployConfig, ok := deploymentConfig.(*types.FabricPeerDeploymentConfig)
- if !ok {
- return fmt.Errorf("invalid peer deployment config type")
- }
-
- org, err := s.orgService.GetOrganization(ctx, peerConfig.OrganizationID)
- if err != nil {
- return fmt.Errorf("failed to get organization: %w", err)
- }
-
- localPeer := s.getPeerFromConfig(dbNode, org, peerConfig)
- err = localPeer.RenewCertificates(peerDeployConfig)
- if err != nil {
- return fmt.Errorf("failed to renew peer certificates: %w", err)
- }
-
- return nil
-}
-
-// renewOrdererCertificates handles certificate renewal for a Fabric orderer
-func (s *NodeService) renewOrdererCertificates(ctx context.Context, dbNode *db.Node, deploymentConfig types.NodeDeploymentConfig) error {
- nodeConfig, err := utils.LoadNodeConfig([]byte(dbNode.NodeConfig.String))
- if err != nil {
- return fmt.Errorf("failed to load node config: %w", err)
- }
-
- ordererConfig, ok := nodeConfig.(*types.FabricOrdererConfig)
- if !ok {
- return fmt.Errorf("invalid orderer config type")
- }
-
- ordererDeployConfig, ok := deploymentConfig.(*types.FabricOrdererDeploymentConfig)
- if !ok {
- return fmt.Errorf("invalid orderer deployment config type")
- }
-
- org, err := s.orgService.GetOrganization(ctx, ordererConfig.OrganizationID)
- if err != nil {
- return fmt.Errorf("failed to get organization: %w", err)
- }
-
- localOrderer := s.getOrdererFromConfig(dbNode, org, ordererConfig)
- err = localOrderer.RenewCertificates(ordererDeployConfig)
- if err != nil {
- return fmt.Errorf("failed to renew orderer certificates: %w", err)
- }
-
- return nil
-}
-
// UpdateNodeEnvironment updates the environment variables for a node
func (s *NodeService) UpdateNodeEnvironment(ctx context.Context, nodeID int64, req *types.UpdateNodeEnvRequest) (*types.UpdateNodeEnvResponse, error) {
// Get the node from the database
@@ -2348,6 +1284,15 @@ func (s *NodeService) UpdateNodeEnvironment(ctx context.Context, nodeID int64, r
return nil, fmt.Errorf("failed to get node: %w", err)
}
+ // Create environment update event
+ if err := s.eventService.CreateEvent(ctx, nodeID, NodeEventStarting, map[string]interface{}{
+ "node_id": nodeID,
+ "name": dbNode.Name,
+ "action": "environment_update",
+ }); err != nil {
+ s.logger.Error("Failed to create environment update event", "error", err)
+ }
+
// Get the node's current configuration
switch dbNode.NodeType.String {
case string(types.NodeTypeFabricPeer):
@@ -2364,6 +1309,15 @@ func (s *NodeService) UpdateNodeEnvironment(ctx context.Context, nodeID int64, r
ID: nodeID,
NodeConfig: sql.NullString{String: string(newConfig), Valid: true},
}); err != nil {
+ // Create error event
+ if err := s.eventService.CreateEvent(ctx, nodeID, NodeEventError, map[string]interface{}{
+ "node_id": nodeID,
+ "name": dbNode.Name,
+ "action": "environment_update",
+ "error": err.Error(),
+ }); err != nil {
+ s.logger.Error("Failed to create error event", "error", err)
+ }
return nil, fmt.Errorf("failed to update node config: %w", err)
}
@@ -2381,6 +1335,15 @@ func (s *NodeService) UpdateNodeEnvironment(ctx context.Context, nodeID int64, r
ID: nodeID,
NodeConfig: sql.NullString{String: string(newConfig), Valid: true},
}); err != nil {
+ // Create error event
+ if err := s.eventService.CreateEvent(ctx, nodeID, NodeEventError, map[string]interface{}{
+ "node_id": nodeID,
+ "name": dbNode.Name,
+ "action": "environment_update",
+ "error": err.Error(),
+ }); err != nil {
+ s.logger.Error("Failed to create error event", "error", err)
+ }
return nil, fmt.Errorf("failed to update node config: %w", err)
}
@@ -2388,6 +1351,15 @@ func (s *NodeService) UpdateNodeEnvironment(ctx context.Context, nodeID int64, r
return nil, fmt.Errorf("unsupported node type: %s", dbNode.NodeType.String)
}
+ // Create environment update completed event
+ if err := s.eventService.CreateEvent(ctx, nodeID, NodeEventStarted, map[string]interface{}{
+ "node_id": nodeID,
+ "name": dbNode.Name,
+ "action": "environment_update",
+ }); err != nil {
+ s.logger.Error("Failed to create environment update completed event", "error", err)
+ }
+
// Return the updated environment variables and indicate that a restart is required
return &types.UpdateNodeEnvResponse{
Env: req.Env,
@@ -2424,289 +1396,6 @@ func (s *NodeService) GetNodeEnvironment(ctx context.Context, nodeID int64) (map
}
}
-// UpdateFabricPeer updates a Fabric peer node configuration
-func (s *NodeService) UpdateFabricPeer(ctx context.Context, opts UpdateFabricPeerOpts) (*NodeResponse, error) {
- // Get the node from database
- node, err := s.db.GetNode(ctx, opts.NodeID)
- if err != nil {
- if err == sql.ErrNoRows {
- return nil, errors.NewNotFoundError("peer node not found", nil)
- }
- return nil, fmt.Errorf("failed to get peer node: %w", err)
- }
-
- // Verify node type
- if types.NodeType(node.NodeType.String) != types.NodeTypeFabricPeer {
- return nil, fmt.Errorf("node %d is not a Fabric peer", opts.NodeID)
- }
-
- // Load current config
- nodeConfig, err := utils.LoadNodeConfig([]byte(node.NodeConfig.String))
- if err != nil {
- return nil, fmt.Errorf("failed to load peer config: %w", err)
- }
-
- peerConfig, ok := nodeConfig.(*types.FabricPeerConfig)
- if !ok {
- return nil, fmt.Errorf("invalid peer config type")
- }
-
- deployConfig, err := utils.DeserializeDeploymentConfig(node.DeploymentConfig.String)
- if err != nil {
- return nil, fmt.Errorf("failed to deserialize deployment config: %w", err)
- }
- deployPeerConfig, ok := deployConfig.(*types.FabricPeerDeploymentConfig)
- if !ok {
- return nil, fmt.Errorf("invalid deployment config type")
- }
-
- // Update configuration fields if provided
- if opts.ExternalEndpoint != "" && opts.ExternalEndpoint != peerConfig.ExternalEndpoint {
- peerConfig.ExternalEndpoint = opts.ExternalEndpoint
- }
- if opts.ListenAddress != "" && opts.ListenAddress != peerConfig.ListenAddress {
- if err := s.validateAddress(opts.ListenAddress); err != nil {
- return nil, fmt.Errorf("invalid listen address: %w", err)
- }
- peerConfig.ListenAddress = opts.ListenAddress
- }
- if opts.EventsAddress != "" && opts.EventsAddress != peerConfig.EventsAddress {
- if err := s.validateAddress(opts.EventsAddress); err != nil {
- return nil, fmt.Errorf("invalid events address: %w", err)
- }
- peerConfig.EventsAddress = opts.EventsAddress
- }
- if opts.OperationsListenAddress != "" && opts.OperationsListenAddress != peerConfig.OperationsListenAddress {
- if err := s.validateAddress(opts.OperationsListenAddress); err != nil {
- return nil, fmt.Errorf("invalid operations listen address: %w", err)
- }
- peerConfig.OperationsListenAddress = opts.OperationsListenAddress
- }
- if opts.ChaincodeAddress != "" && opts.ChaincodeAddress != peerConfig.ChaincodeAddress {
- if err := s.validateAddress(opts.ChaincodeAddress); err != nil {
- return nil, fmt.Errorf("invalid chaincode address: %w", err)
- }
- peerConfig.ChaincodeAddress = opts.ChaincodeAddress
- }
- if opts.DomainNames != nil {
- peerConfig.DomainNames = opts.DomainNames
- }
- if opts.Env != nil {
- peerConfig.Env = opts.Env
- }
- if opts.AddressOverrides != nil {
- peerConfig.AddressOverrides = opts.AddressOverrides
- deployPeerConfig.AddressOverrides = opts.AddressOverrides
- }
- if opts.Version != "" {
- peerConfig.Version = opts.Version
- deployPeerConfig.Version = opts.Version
- }
-
- // Validate all addresses together for port conflicts
- if err := s.validateFabricPeerAddresses(peerConfig); err != nil {
- return nil, err
- }
-
- configBytes, err := utils.StoreNodeConfig(nodeConfig)
- if err != nil {
- return nil, fmt.Errorf("failed to store node config: %w", err)
- }
- node, err = s.db.UpdateNodeConfig(ctx, &db.UpdateNodeConfigParams{
- ID: opts.NodeID,
- NodeConfig: sql.NullString{
- String: string(configBytes),
- Valid: true,
- },
- })
- if err != nil {
- return nil, fmt.Errorf("failed to update node config: %w", err)
- }
-
- // Update the deployment config in the database
- deploymentConfigBytes, err := json.Marshal(deployPeerConfig)
- if err != nil {
- return nil, fmt.Errorf("failed to marshal updated deployment config: %w", err)
- }
-
- node, err = s.db.UpdateDeploymentConfig(ctx, &db.UpdateDeploymentConfigParams{
- ID: opts.NodeID,
- DeploymentConfig: sql.NullString{
- String: string(deploymentConfigBytes),
- Valid: true,
- },
- })
-
- // Synchronize the peer config
- if err := s.SynchronizePeerConfig(ctx, opts.NodeID); err != nil {
- return nil, fmt.Errorf("failed to synchronize peer config: %w", err)
- }
-
- // Return updated node response
- _, nodeResponse := s.mapDBNodeToServiceNode(node)
- return nodeResponse, nil
-}
-
-// UpdateFabricOrderer updates a Fabric orderer node configuration
-func (s *NodeService) UpdateFabricOrderer(ctx context.Context, opts UpdateFabricOrdererOpts) (*NodeResponse, error) {
- // Get the node from database
- node, err := s.db.GetNode(ctx, opts.NodeID)
- if err != nil {
- if err == sql.ErrNoRows {
- return nil, errors.NewNotFoundError("orderer node not found", nil)
- }
- return nil, fmt.Errorf("failed to get orderer node: %w", err)
- }
-
- // Verify node type
- if types.NodeType(node.NodeType.String) != types.NodeTypeFabricOrderer {
- return nil, fmt.Errorf("node %d is not a Fabric orderer", opts.NodeID)
- }
-
- // Load current config
- nodeConfig, err := utils.LoadNodeConfig([]byte(node.NodeConfig.String))
- if err != nil {
- return nil, fmt.Errorf("failed to load orderer config: %w", err)
- }
-
- ordererConfig, ok := nodeConfig.(*types.FabricOrdererConfig)
- if !ok {
- return nil, fmt.Errorf("invalid orderer config type")
- }
-
- // Load deployment config
- deployOrdererConfig := &types.FabricOrdererDeploymentConfig{}
- if node.DeploymentConfig.Valid {
- deploymentConfig, err := utils.DeserializeDeploymentConfig(node.DeploymentConfig.String)
- if err != nil {
- return nil, fmt.Errorf("failed to deserialize deployment config: %w", err)
- }
- var ok bool
- deployOrdererConfig, ok = deploymentConfig.(*types.FabricOrdererDeploymentConfig)
- if !ok {
- return nil, fmt.Errorf("invalid orderer deployment config type")
- }
- }
-
- // Update configuration fields if provided
- if opts.ExternalEndpoint != "" && opts.ExternalEndpoint != ordererConfig.ExternalEndpoint {
- ordererConfig.ExternalEndpoint = opts.ExternalEndpoint
- }
- if opts.ListenAddress != "" && opts.ListenAddress != ordererConfig.ListenAddress {
- if err := s.validateAddress(opts.ListenAddress); err != nil {
- return nil, fmt.Errorf("invalid listen address: %w", err)
- }
- ordererConfig.ListenAddress = opts.ListenAddress
- }
- if opts.AdminAddress != "" && opts.AdminAddress != ordererConfig.AdminAddress {
- if err := s.validateAddress(opts.AdminAddress); err != nil {
- return nil, fmt.Errorf("invalid admin address: %w", err)
- }
- ordererConfig.AdminAddress = opts.AdminAddress
- }
- if opts.OperationsListenAddress != "" && opts.OperationsListenAddress != ordererConfig.OperationsListenAddress {
- if err := s.validateAddress(opts.OperationsListenAddress); err != nil {
- return nil, fmt.Errorf("invalid operations listen address: %w", err)
- }
- ordererConfig.OperationsListenAddress = opts.OperationsListenAddress
- }
- if opts.DomainNames != nil {
- ordererConfig.DomainNames = opts.DomainNames
- }
- if opts.Env != nil {
- ordererConfig.Env = opts.Env
- }
- if opts.Version != "" {
- ordererConfig.Version = opts.Version
- deployOrdererConfig.Version = opts.Version
- }
-
- configBytes, err := utils.StoreNodeConfig(nodeConfig)
- if err != nil {
- return nil, fmt.Errorf("failed to store node config: %w", err)
- }
- node, err = s.db.UpdateNodeConfig(ctx, &db.UpdateNodeConfigParams{
- ID: opts.NodeID,
- NodeConfig: sql.NullString{
- String: string(configBytes),
- Valid: true,
- },
- })
- if err != nil {
- return nil, fmt.Errorf("failed to update node config: %w", err)
- }
-
- // Update the deployment config in the database
- deploymentConfigBytes, err := json.Marshal(deployOrdererConfig)
- if err != nil {
- return nil, fmt.Errorf("failed to marshal updated deployment config: %w", err)
- }
-
- node, err = s.db.UpdateDeploymentConfig(ctx, &db.UpdateDeploymentConfigParams{
- ID: opts.NodeID,
- DeploymentConfig: sql.NullString{
- String: string(deploymentConfigBytes),
- Valid: true,
- },
- })
-
- // Return updated node response
- _, nodeResponse := s.mapDBNodeToServiceNode(node)
- return nodeResponse, nil
-}
-
-// SynchronizePeerConfig synchronizes the peer's configuration files and service
-func (s *NodeService) SynchronizePeerConfig(ctx context.Context, nodeID int64) error {
- // Get the node from database
- node, err := s.db.GetNode(ctx, nodeID)
- if err != nil {
- return fmt.Errorf("failed to get node: %w", err)
- }
-
- // Verify node type
- if types.NodeType(node.NodeType.String) != types.NodeTypeFabricPeer {
- return fmt.Errorf("node %d is not a Fabric peer", nodeID)
- }
-
- // Load node config
- nodeConfig, err := utils.LoadNodeConfig([]byte(node.NodeConfig.String))
- if err != nil {
- return fmt.Errorf("failed to load node config: %w", err)
- }
-
- peerConfig, ok := nodeConfig.(*types.FabricPeerConfig)
- if !ok {
- return fmt.Errorf("invalid peer config type")
- }
-
- // Get organization
- org, err := s.orgService.GetOrganization(ctx, peerConfig.OrganizationID)
- if err != nil {
- return fmt.Errorf("failed to get organization: %w", err)
- }
-
- // Get local peer instance
- localPeer := s.getPeerFromConfig(node, org, peerConfig)
-
- // Get deployment config
- deploymentConfig, err := utils.DeserializeDeploymentConfig(node.DeploymentConfig.String)
- if err != nil {
- return fmt.Errorf("failed to deserialize deployment config: %w", err)
- }
-
- peerDeployConfig, ok := deploymentConfig.(*types.FabricPeerDeploymentConfig)
- if !ok {
- return fmt.Errorf("invalid peer deployment config type")
- }
-
- // Synchronize configuration
- if err := localPeer.SynchronizeConfig(peerDeployConfig); err != nil {
- return fmt.Errorf("failed to synchronize peer config: %w", err)
- }
-
- return nil
-}
-
// GetExternalIP returns the external IP address of the node
func (s *NodeService) GetExternalIP() (string, error) {
// Try to get external IP from environment variable first
@@ -2758,166 +1447,3 @@ func (s *NodeService) GetExternalIP() (string, error) {
// Fallback to localhost if no suitable interface is found
return "127.0.0.1", nil
}
-
-// UpdateBesuNodeOpts contains the options for updating a Besu node
-type UpdateBesuNodeRequest struct {
- NetworkID uint `json:"networkId" validate:"required"`
- P2PHost string `json:"p2pHost" validate:"required"`
- P2PPort uint `json:"p2pPort" validate:"required"`
- RPCHost string `json:"rpcHost" validate:"required"`
- RPCPort uint `json:"rpcPort" validate:"required"`
- Bootnodes []string `json:"bootnodes,omitempty"`
- ExternalIP string `json:"externalIp,omitempty"`
- InternalIP string `json:"internalIp,omitempty"`
- Env map[string]string `json:"env,omitempty"`
-}
-
-// UpdateBesuNode updates an existing Besu node configuration
-func (s *NodeService) UpdateBesuNode(ctx context.Context, nodeID int64, req UpdateBesuNodeRequest) (*NodeResponse, error) {
- // Get existing node
- node, err := s.db.GetNode(ctx, nodeID)
- if err != nil {
- if err == sql.ErrNoRows {
- return nil, errors.NewNotFoundError("node not found", nil)
- }
- return nil, fmt.Errorf("failed to get node: %w", err)
- }
-
- // Verify node type
- if types.NodeType(node.NodeType.String) != types.NodeTypeBesuFullnode {
- return nil, errors.NewValidationError("node is not a Besu node", nil)
- }
-
- // Load current config
- nodeConfig, err := utils.LoadNodeConfig([]byte(node.NodeConfig.String))
- if err != nil {
- return nil, fmt.Errorf("failed to load besu config: %w", err)
- }
-
- besuConfig, ok := nodeConfig.(*types.BesuNodeConfig)
- if !ok {
- return nil, fmt.Errorf("invalid besu config type")
- }
-
- // Load deployment config
- deployBesuConfig := &types.BesuNodeDeploymentConfig{}
- if node.DeploymentConfig.Valid {
- deploymentConfig, err := utils.DeserializeDeploymentConfig(node.DeploymentConfig.String)
- if err != nil {
- return nil, fmt.Errorf("failed to deserialize deployment config: %w", err)
- }
- var ok bool
- deployBesuConfig, ok = deploymentConfig.(*types.BesuNodeDeploymentConfig)
- if !ok {
- return nil, fmt.Errorf("invalid besu deployment config type")
- }
- }
-
- // Update configuration fields
- besuConfig.NetworkID = int64(req.NetworkID)
- besuConfig.P2PPort = req.P2PPort
- besuConfig.RPCPort = req.RPCPort
- besuConfig.P2PHost = req.P2PHost
- besuConfig.RPCHost = req.RPCHost
- deployBesuConfig.NetworkID = int64(req.NetworkID)
- deployBesuConfig.P2PPort = req.P2PPort
- deployBesuConfig.RPCPort = req.RPCPort
- deployBesuConfig.P2PHost = req.P2PHost
- deployBesuConfig.RPCHost = req.RPCHost
- if req.Bootnodes != nil {
- besuConfig.BootNodes = req.Bootnodes
- }
-
- if req.ExternalIP != "" {
- besuConfig.ExternalIP = req.ExternalIP
- deployBesuConfig.ExternalIP = req.ExternalIP
- }
- if req.InternalIP != "" {
- besuConfig.InternalIP = req.InternalIP
- deployBesuConfig.InternalIP = req.InternalIP
- }
-
- // Update environment variables
- if req.Env != nil {
- besuConfig.Env = req.Env
- deployBesuConfig.Env = req.Env
- }
-
- // Get the key to update the enodeURL
- key, err := s.keymanagementService.GetKey(ctx, int(besuConfig.KeyID))
- if err != nil {
- return nil, fmt.Errorf("failed to get key: %w", err)
- }
-
- // Update enodeURL based on the public key, external IP and P2P port
- if key.PublicKey != "" {
- publicKey := key.PublicKey[2:]
- deployBesuConfig.EnodeURL = fmt.Sprintf("enode://%s@%s:%d", publicKey, besuConfig.ExternalIP, besuConfig.P2PPort)
- }
-
- // Store updated node config
- configBytes, err := utils.StoreNodeConfig(besuConfig)
- if err != nil {
- return nil, fmt.Errorf("failed to store node config: %w", err)
- }
-
- node, err = s.db.UpdateNodeConfig(ctx, &db.UpdateNodeConfigParams{
- ID: nodeID,
- NodeConfig: sql.NullString{
- String: string(configBytes),
- Valid: true,
- },
- })
- if err != nil {
- return nil, fmt.Errorf("failed to update node config: %w", err)
- }
-
- // Update deployment config
- deploymentConfigBytes, err := json.Marshal(deployBesuConfig)
- if err != nil {
- return nil, fmt.Errorf("failed to marshal deployment config: %w", err)
- }
-
- node, err = s.db.UpdateDeploymentConfig(ctx, &db.UpdateDeploymentConfigParams{
- ID: nodeID,
- DeploymentConfig: sql.NullString{
- String: string(deploymentConfigBytes),
- Valid: true,
- },
- })
- if err != nil {
- return nil, fmt.Errorf("failed to update deployment config: %w", err)
- }
-
- // Return updated node
- _, nodeResponse := s.mapDBNodeToServiceNode(node)
- return nodeResponse, nil
-}
-
-// validateBesuConfig validates the Besu node configuration
-func (s *NodeService) validateBesuConfig(config *types.BesuNodeConfig) error {
-
- if config.P2PPort == 0 {
- return fmt.Errorf("p2p port is required")
- }
- if config.RPCPort == 0 {
- return fmt.Errorf("rpc port is required")
- }
- if config.NetworkID == 0 {
- return fmt.Errorf("network ID is required")
- }
- if config.P2PHost == "" {
- return fmt.Errorf("p2p host is required")
- }
- if config.RPCHost == "" {
- return fmt.Errorf("rpc host is required")
- }
- if config.ExternalIP == "" {
- return fmt.Errorf("external IP is required")
- }
- if config.InternalIP == "" {
- return fmt.Errorf("internal IP is required")
- }
-
- return nil
-}
diff --git a/pkg/nodes/types/deployment.go b/pkg/nodes/types/deployment.go
index cab3408..8b4c2e0 100644
--- a/pkg/nodes/types/deployment.go
+++ b/pkg/nodes/types/deployment.go
@@ -226,6 +226,12 @@ type BesuNodeDeploymentConfig struct {
NetworkID int64 `json:"networkId" validate:"required" example:"1337"`
// @Description Enode URL for node discovery
EnodeURL string `json:"enodeUrl" example:"enode://pubkey@172.16.1.10:30303"`
+ // @Description Metrics port for Prometheus metrics
+ MetricsPort int64 `json:"metricsPort" validate:"required" example:"9545"`
+ // @Description Whether metrics are enabled
+ MetricsEnabled bool `json:"metricsEnabled" example:"true"`
+ // @Description Metrics protocol (e.g. PROMETHEUS)
+ MetricsProtocol string `json:"metricsProtocol" validate:"required" example:"PROMETHEUS"`
}
func (c *BesuNodeDeploymentConfig) GetMode() string { return c.Mode }
@@ -324,16 +330,20 @@ type FabricOrdererConfig struct {
// BesuNodeConfig represents the parameters needed to create a Besu node
type BesuNodeConfig struct {
BaseNodeConfig
- NetworkID int64 `json:"networkId" validate:"required"`
- KeyID int64 `json:"keyId" validate:"required"`
- P2PPort uint `json:"p2pPort" validate:"required"`
- RPCPort uint `json:"rpcPort" validate:"required"`
- P2PHost string `json:"p2pHost" validate:"required"`
- RPCHost string `json:"rpcHost" validate:"required"`
- ExternalIP string `json:"externalIp" validate:"required"`
- InternalIP string `json:"internalIp" validate:"required"`
- Env map[string]string `json:"env,omitempty"`
- BootNodes []string `json:"bootNodes,omitempty"`
+ NetworkID int64 `json:"networkId" validate:"required"`
+ KeyID int64 `json:"keyId" validate:"required"`
+ P2PPort uint `json:"p2pPort" validate:"required"`
+ RPCPort uint `json:"rpcPort" validate:"required"`
+ P2PHost string `json:"p2pHost" validate:"required"`
+ RPCHost string `json:"rpcHost" validate:"required"`
+ ExternalIP string `json:"externalIp" validate:"required"`
+ InternalIP string `json:"internalIp" validate:"required"`
+ Env map[string]string `json:"env,omitempty"`
+ BootNodes []string `json:"bootNodes,omitempty"`
+ MetricsEnabled bool `json:"metricsEnabled"`
+ MetricsPort int64 `json:"metricsPort"`
+ MetricsProtocol string `json:"metricsProtocol"`
+ Version string `json:"version"`
}
// Add this new type for storage
diff --git a/pkg/plugin/handler.go b/pkg/plugin/handler.go
index ab8581e..1f6ae8a 100644
--- a/pkg/plugin/handler.go
+++ b/pkg/plugin/handler.go
@@ -49,6 +49,7 @@ func (h *Handler) RegisterRoutes(r chi.Router) {
r.Delete("/", response.Middleware(h.deletePlugin))
r.Post("/deploy", response.Middleware(h.deployPlugin))
r.Post("/stop", response.Middleware(h.stopPlugin))
+ r.Post("/resume", response.Middleware(h.resumePlugin))
r.Get("/status", response.Middleware(h.getPluginStatus))
r.Get("/deployment-status", response.Middleware(h.getDeploymentStatus))
r.Get("/services", response.Middleware(h.getDockerComposeServices))
@@ -318,29 +319,25 @@ func (h *Handler) deployPlugin(w http.ResponseWriter, r *http.Request) error {
}
}
- // Create deployment metadata
- deploymentMetadata := map[string]interface{}{
- "parameters": parameters,
- "project_name": plugin.Metadata.Name + "-" + generateRandomSuffix(),
- "created_at": time.Now().UTC(),
- }
-
- if err := h.store.UpdateDeploymentMetadata(r.Context(), name, deploymentMetadata); err != nil {
- return errors.NewInternalError("failed to save deployment metadata", err, nil)
- }
-
- if err := h.store.UpdateDeploymentStatus(r.Context(), name, "deploying"); err != nil {
- return errors.NewInternalError("failed to update deployment status", err, nil)
- }
-
+ // Deploy plugin (x-source validation is handled in DeployPlugin)
if err := h.pm.DeployPlugin(r.Context(), plugin, parameters, h.store); err != nil {
+ if strings.Contains(err.Error(), "x-source parameter validation failed") {
+ return errors.NewValidationError("invalid x-source parameter", map[string]interface{}{
+ "detail": err.Error(),
+ "code": "INVALID_XSOURCE_PARAMETER",
+ })
+ }
_ = h.store.UpdateDeploymentStatus(r.Context(), name, "failed")
return errors.NewInternalError("failed to deploy plugin", err, nil)
}
return response.WriteJSON(w, http.StatusOK, map[string]interface{}{
- "status": "deploying",
- "metadata": deploymentMetadata,
+ "status": "deploying",
+ "metadata": map[string]interface{}{
+ "parameters": parameters,
+ "project_name": plugin.Metadata.Name + "-" + generateRandomSuffix(),
+ "created_at": time.Now().UTC(),
+ },
})
}
@@ -483,3 +480,36 @@ func (h *Handler) getDockerComposeServices(w http.ResponseWriter, r *http.Reques
return response.WriteJSON(w, http.StatusOK, services)
}
+
+// @Summary Resume a plugin deployment
+// @Description Resume a previously deployed plugin
+// @Tags Plugins
+// @Accept json
+// @Produce json
+// @Param name path string true "Plugin name"
+// @Success 200 {object} map[string]string
+// @Failure 404 {object} response.Response
+// @Failure 500 {object} response.Response
+// @Router /plugins/{name}/resume [post]
+func (h *Handler) resumePlugin(w http.ResponseWriter, r *http.Request) error {
+ name := chi.URLParam(r, "name")
+ plugin, err := h.store.GetPlugin(r.Context(), name)
+ if err != nil {
+ if strings.Contains(err.Error(), "not found") {
+ return errors.NewNotFoundError("plugin not found", map[string]interface{}{
+ "detail": "The requested plugin does not exist",
+ "code": "PLUGIN_NOT_FOUND",
+ "plugin_name": name,
+ })
+ }
+ return errors.NewInternalError("failed to get plugin", err, nil)
+ }
+
+ if err := h.pm.ResumePlugin(r.Context(), plugin, h.store); err != nil {
+ return errors.NewInternalError("failed to resume plugin", err, nil)
+ }
+
+ return response.WriteJSON(w, http.StatusOK, map[string]string{
+ "status": "resumed",
+ })
+}
diff --git a/pkg/plugin/plugin.go b/pkg/plugin/plugin.go
index c63f24d..683e3a7 100644
--- a/pkg/plugin/plugin.go
+++ b/pkg/plugin/plugin.go
@@ -2,31 +2,44 @@ package plugin
import (
"context"
+ "encoding/json"
"fmt"
"os"
"path/filepath"
"strings"
"time"
+ "github.com/chainlaunch/chainlaunch/pkg/logger"
plugintypes "github.com/chainlaunch/chainlaunch/pkg/plugin/types"
"github.com/docker/cli/cli/command"
"github.com/docker/cli/cli/flags"
cmdCompose "github.com/docker/compose/v2/cmd/compose"
"github.com/docker/compose/v2/pkg/api"
+ "bytes"
+ "text/template"
+
+ "github.com/chainlaunch/chainlaunch/pkg/db"
+ key "github.com/chainlaunch/chainlaunch/pkg/keymanagement/service"
+ nodeservice "github.com/chainlaunch/chainlaunch/pkg/nodes/service"
+ "github.com/chainlaunch/chainlaunch/pkg/plugin/xsource"
"github.com/docker/compose/v2/pkg/compose"
"gopkg.in/yaml.v3"
)
// PluginManager handles plugin operations
type PluginManager struct {
- pluginsDir string
- compose api.Service
- dockerCli *command.DockerCli
+ pluginsDir string
+ compose api.Service
+ dockerCli *command.DockerCli
+ db *db.Queries
+ nodeService *nodeservice.NodeService
+ keyManagement *key.KeyManagementService
+ logger *logger.Logger
}
// NewPluginManager creates a new plugin manager
-func NewPluginManager(pluginsDir string) (*PluginManager, error) {
+func NewPluginManager(pluginsDir string, db *db.Queries, nodeService *nodeservice.NodeService, keyManagement *key.KeyManagementService, logger *logger.Logger) (*PluginManager, error) {
dockerCli, err := command.NewDockerCli()
if err != nil {
return nil, fmt.Errorf("failed to create Docker CLI: %w", err)
@@ -47,9 +60,13 @@ func NewPluginManager(pluginsDir string) (*PluginManager, error) {
composeService := compose.NewComposeService(dockerCli)
return &PluginManager{
- pluginsDir: pluginsDir,
- compose: composeService,
- dockerCli: dockerCli,
+ pluginsDir: pluginsDir,
+ compose: composeService,
+ dockerCli: dockerCli,
+ db: db,
+ nodeService: nodeService,
+ keyManagement: keyManagement,
+ logger: logger,
}, nil
}
@@ -68,8 +85,121 @@ func (pm *PluginManager) LoadPlugin(filePath string) (*plugintypes.Plugin, error
return &plugin, nil
}
+// validateXSourceParameters validates x-source parameters using the store's fetchers
+func validateXSourceParameters(ctx context.Context, plugin *plugintypes.Plugin, parameters map[string]interface{}, db *db.Queries, nodeService *nodeservice.NodeService, keyManagement *key.KeyManagementService) error {
+ // Marshal the plugin's parameters schema to JSON
+ schemaJSON, err := json.Marshal(plugin.Spec.Parameters)
+ if err != nil {
+ return fmt.Errorf("failed to marshal parameters schema: %w", err)
+ }
+
+ // Extract x-source fields from the schema
+ var schemaData map[string]interface{}
+ if err := json.Unmarshal(schemaJSON, &schemaData); err != nil {
+ return fmt.Errorf("failed to unmarshal parameters schema: %w", err)
+ }
+
+ // Create x-source registry
+ registry := xsource.NewRegistry(db, nodeService, keyManagement)
+
+ // Get properties from schema
+ properties, ok := schemaData["properties"].(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("invalid schema: properties not found")
+ }
+
+ // Validate each parameter
+ for name, prop := range properties {
+ propMap, ok := prop.(map[string]interface{})
+ if !ok {
+ continue
+ }
+
+ // Check if this is an x-source parameter
+ xSourceType, ok := propMap["x-source"].(string)
+ if !ok {
+ continue
+ }
+
+ // Check if parameter is required
+ required, _ := propMap["required"].(bool)
+ value, exists := parameters[name]
+
+ if required && !exists {
+ return fmt.Errorf("required parameter '%s' is missing", name)
+ }
+
+ if exists {
+ // Validate and process the parameter using the registry
+ _, err := registry.ValidateAndProcess(ctx, xsource.XSourceType(xSourceType), name, value)
+ if err != nil {
+ return fmt.Errorf("invalid value for parameter '%s': %w", name, err)
+ }
+ }
+ }
+
+ return nil
+}
+
+// processXSourceParameters processes parameters that have x-source specifications
+func (pm *PluginManager) processXSourceParameters(ctx context.Context, plugin *plugintypes.Plugin, parameters map[string]interface{}) (map[string]interface{}, []xsource.VolumeMount, error) {
+ processedParameters := make(map[string]interface{})
+ var volumeMounts []xsource.VolumeMount
+
+ // Create x-source registry
+ registry := xsource.NewRegistry(pm.db, pm.nodeService, pm.keyManagement)
+
+ for key, value := range parameters {
+ // Check if this parameter has an x-source specification
+ if spec, ok := plugin.Spec.Parameters.Properties[key]; ok && spec.XSource != "" {
+ // Get the handler for this x-source type
+ handler, err := registry.GetHandler(xsource.XSourceType(spec.XSource))
+ if err != nil {
+ return nil, nil, fmt.Errorf("failed to get handler for x-source type %s: %w", spec.XSource, err)
+ }
+
+ // Create the x-source value
+ xsourceValue, err := handler.CreateValue(key, value)
+ if err != nil {
+ return nil, nil, fmt.Errorf("failed to create x-source value for %s: %w", key, err)
+ }
+
+ // Validate the value
+ if err := xsourceValue.Validate(ctx); err != nil {
+ return nil, nil, fmt.Errorf("invalid x-source value for %s: %w", key, err)
+ }
+
+ // Get the processed value for templates
+ processedValue, err := xsourceValue.GetValue(ctx)
+ if err != nil {
+ return nil, nil, fmt.Errorf("failed to get x-source value for %s: %w", key, err)
+ }
+
+ // Get volume mounts
+ mounts, err := xsourceValue.GetVolumeMounts(ctx)
+ if err != nil {
+ return nil, nil, fmt.Errorf("failed to get volume mounts for %s: %w", key, err)
+ }
+ volumeMounts = append(volumeMounts, mounts...)
+
+ processedParameters[key] = processedValue
+ } else {
+ // For non-x-source parameters, keep as is
+ processedParameters[key] = value
+ }
+ }
+
+ return processedParameters, volumeMounts, nil
+}
+
// DeployPlugin deploys a plugin using docker-compose
func (pm *PluginManager) DeployPlugin(ctx context.Context, plugin *plugintypes.Plugin, parameters map[string]interface{}, store Store) error {
+ // Validate x-source parameters before deployment
+ if err := validateXSourceParameters(ctx, plugin, parameters, pm.db, pm.nodeService, pm.keyManagement); err != nil {
+ _ = store.UpdateDeploymentStatus(ctx, plugin.Metadata.Name, "failed")
+ return fmt.Errorf("x-source parameter validation failed: %w", err)
+ }
+
// Update plugin status to deploying
if err := store.UpdateDeploymentStatus(ctx, plugin.Metadata.Name, "deploying"); err != nil {
return fmt.Errorf("failed to update deployment status: %w", err)
@@ -84,9 +214,38 @@ func (pm *PluginManager) DeployPlugin(ctx context.Context, plugin *plugintypes.P
}
defer os.RemoveAll(tempDir)
- // Write the docker-compose contents to a file
+ // Process x-source parameters and fetch complete details
+ processedParameters, volumeMounts, err := pm.processXSourceParameters(ctx, plugin, parameters)
+ if err != nil {
+ _ = store.UpdateDeploymentStatus(ctx, plugin.Metadata.Name, "failed")
+ return fmt.Errorf("failed to process x-source parameters: %w", err)
+ }
+
+ pm.logger.Infof("Processed parameters: %v", processedParameters)
+ pm.logger.Infof("Volume mounts: %v", volumeMounts)
+
+ // Add volume mounts to the template data
+ templateData := map[string]interface{}{
+ "parameters": processedParameters,
+ "volumeMounts": volumeMounts,
+ }
+
+ // Render the docker-compose contents as a Go template
+ var renderedCompose bytes.Buffer
+ tmpl, err := template.New("docker-compose").Parse(plugin.Spec.DockerCompose.Contents)
+ if err != nil {
+ _ = store.UpdateDeploymentStatus(ctx, plugin.Metadata.Name, "failed")
+ return fmt.Errorf("failed to parse docker-compose template: %w", err)
+ }
+
+ if err := tmpl.Execute(&renderedCompose, templateData); err != nil {
+ _ = store.UpdateDeploymentStatus(ctx, plugin.Metadata.Name, "failed")
+ return fmt.Errorf("failed to render docker-compose template: %w", err)
+ }
+
+ // Write the rendered docker-compose contents to a file
composePath := filepath.Join(tempDir, "docker-compose.yml")
- if err := os.WriteFile(composePath, []byte(plugin.Spec.DockerCompose.Contents), 0644); err != nil {
+ if err := os.WriteFile(composePath, renderedCompose.Bytes(), 0644); err != nil {
// Update status to failed
_ = store.UpdateDeploymentStatus(ctx, plugin.Metadata.Name, "failed")
return fmt.Errorf("failed to write docker-compose file: %w", err)
@@ -94,7 +253,7 @@ func (pm *PluginManager) DeployPlugin(ctx context.Context, plugin *plugintypes.P
// Create environment variables file
envVars := make(map[string]string)
- for name, value := range parameters {
+ for name, value := range processedParameters {
if strValue, ok := value.(string); ok {
envVars[name] = strValue
}
@@ -215,6 +374,24 @@ func (pm *PluginManager) ValidatePlugin(plugin *plugintypes.Plugin) error {
// StopPlugin stops a running plugin deployment
func (pm *PluginManager) StopPlugin(ctx context.Context, plugin *plugintypes.Plugin, store Store) error {
+ // Get the deployment metadata to retrieve the original parameters
+ deploymentMetadata, err := store.GetDeploymentMetadata(ctx, plugin.Metadata.Name)
+ if err != nil {
+ return fmt.Errorf("failed to get deployment metadata: %w", err)
+ }
+
+ // Extract parameters from metadata
+ parameters, ok := deploymentMetadata["parameters"].(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("invalid deployment metadata: parameters not found")
+ }
+
+ // Process x-source parameters to get volume mounts
+ processedParameters, volumeMounts, err := pm.processXSourceParameters(ctx, plugin, parameters)
+ if err != nil {
+ return fmt.Errorf("failed to process x-source parameters: %w", err)
+ }
+
// Create a temporary directory for the plugin
tempDir, err := os.MkdirTemp("", plugin.Metadata.Name)
if err != nil {
@@ -222,9 +399,26 @@ func (pm *PluginManager) StopPlugin(ctx context.Context, plugin *plugintypes.Plu
}
defer os.RemoveAll(tempDir)
- // Write the docker-compose contents to a file
+ // Add volume mounts to the template data
+ templateData := map[string]interface{}{
+ "parameters": processedParameters,
+ "volumeMounts": volumeMounts,
+ }
+
+ // Render the docker-compose contents as a Go template
+ var renderedCompose bytes.Buffer
+ tmpl, err := template.New("docker-compose").Parse(plugin.Spec.DockerCompose.Contents)
+ if err != nil {
+ return fmt.Errorf("failed to parse docker-compose template: %w", err)
+ }
+
+ if err := tmpl.Execute(&renderedCompose, templateData); err != nil {
+ return fmt.Errorf("failed to render docker-compose template: %w", err)
+ }
+
+ // Write the rendered docker-compose contents to a file
composePath := filepath.Join(tempDir, "docker-compose.yml")
- if err := os.WriteFile(composePath, []byte(plugin.Spec.DockerCompose.Contents), 0644); err != nil {
+ if err := os.WriteFile(composePath, renderedCompose.Bytes(), 0644); err != nil {
return fmt.Errorf("failed to write docker-compose file: %w", err)
}
@@ -250,6 +444,13 @@ func (pm *PluginManager) StopPlugin(ctx context.Context, plugin *plugintypes.Plu
return fmt.Errorf("failed to stop project: %w", err)
}
+ // Clean up volume mounts
+ for _, mount := range volumeMounts {
+ if err := os.RemoveAll(mount.Source); err != nil {
+ pm.logger.Warnf("Failed to clean up volume mount %s: %v", mount.Source, err)
+ }
+ }
+
if err := store.UpdateDeploymentStatus(ctx, plugin.Metadata.Name, "stopped"); err != nil {
return fmt.Errorf("failed to store plugin status: %w", err)
}
@@ -348,8 +549,24 @@ func (pm *PluginManager) GetDeploymentStatus(ctx context.Context, plugin *plugin
// GetDockerComposeServices retrieves all services with their current status
func (pm *PluginManager) GetDockerComposeServices(ctx context.Context, plugin *plugintypes.Plugin, store Store) ([]ServiceStatus, error) {
- // Get deployment metadata to get the project name
- projectName := plugin.Metadata.Name
+ // Get deployment metadata to get the project name and parameters
+ deploymentMetadata, err := store.GetDeploymentMetadata(ctx, plugin.Metadata.Name)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get deployment metadata: %w", err)
+ }
+
+ // Extract parameters from metadata
+ parameters, ok := deploymentMetadata["parameters"].(map[string]interface{})
+ if !ok {
+ return nil, fmt.Errorf("invalid deployment metadata: parameters not found")
+ }
+
+ // Process x-source parameters to get volume mounts
+ processedParameters, volumeMounts, err := pm.processXSourceParameters(ctx, plugin, parameters)
+ if err != nil {
+ return nil, fmt.Errorf("failed to process x-source parameters: %w", err)
+ }
+
// Create a temporary directory for the plugin
tempDir, err := os.MkdirTemp("", plugin.Metadata.Name)
if err != nil {
@@ -357,14 +574,31 @@ func (pm *PluginManager) GetDockerComposeServices(ctx context.Context, plugin *p
}
defer os.RemoveAll(tempDir)
- // Write the docker-compose contents to a file
+ // Add volume mounts to the template data
+ templateData := map[string]interface{}{
+ "parameters": processedParameters,
+ "volumeMounts": volumeMounts,
+ }
+
+ // Render the docker-compose contents as a Go template
+ var renderedCompose bytes.Buffer
+ tmpl, err := template.New("docker-compose").Parse(plugin.Spec.DockerCompose.Contents)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse docker-compose template: %w", err)
+ }
+
+ if err := tmpl.Execute(&renderedCompose, templateData); err != nil {
+ return nil, fmt.Errorf("failed to render docker-compose template: %w", err)
+ }
+
+ // Write the rendered docker-compose contents to a file
composePath := filepath.Join(tempDir, "docker-compose.yml")
- if err := os.WriteFile(composePath, []byte(plugin.Spec.DockerCompose.Contents), 0644); err != nil {
+ if err := os.WriteFile(composePath, renderedCompose.Bytes(), 0644); err != nil {
return nil, fmt.Errorf("failed to write docker-compose file: %w", err)
}
projectOptions := cmdCompose.ProjectOptions{
- ProjectName: projectName,
+ ProjectName: plugin.Metadata.Name,
ConfigPaths: []string{composePath},
}
@@ -451,3 +685,94 @@ func (pm *PluginManager) GetDockerComposeServices(ctx context.Context, plugin *p
return serviceStatuses, nil
}
+
+// ResumePlugin resumes a previously deployed plugin
+func (pm *PluginManager) ResumePlugin(ctx context.Context, plugin *plugintypes.Plugin, store Store) error {
+ // Get the deployment metadata to retrieve the original parameters
+ deploymentMetadata, err := store.GetDeploymentMetadata(ctx, plugin.Metadata.Name)
+ if err != nil {
+ return fmt.Errorf("failed to get deployment metadata: %w", err)
+ }
+
+ // Extract parameters from metadata
+ parameters, ok := deploymentMetadata["parameters"].(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("invalid deployment metadata: parameters not found")
+ }
+
+ // Process x-source parameters to get volume mounts
+ processedParameters, volumeMounts, err := pm.processXSourceParameters(ctx, plugin, parameters)
+ if err != nil {
+ return fmt.Errorf("failed to process x-source parameters: %w", err)
+ }
+
+ // Create a temporary directory for the plugin
+ tempDir, err := os.MkdirTemp("", plugin.Metadata.Name)
+ if err != nil {
+ return fmt.Errorf("failed to create temporary directory: %w", err)
+ }
+ defer os.RemoveAll(tempDir)
+
+ // Add volume mounts to the template data
+ templateData := map[string]interface{}{
+ "parameters": processedParameters,
+ "volumeMounts": volumeMounts,
+ }
+
+ // Render the docker-compose contents as a Go template
+ var renderedCompose bytes.Buffer
+ tmpl, err := template.New("docker-compose").Parse(plugin.Spec.DockerCompose.Contents)
+ if err != nil {
+ return fmt.Errorf("failed to parse docker-compose template: %w", err)
+ }
+
+ if err := tmpl.Execute(&renderedCompose, templateData); err != nil {
+ return fmt.Errorf("failed to render docker-compose template: %w", err)
+ }
+
+ // Write the rendered docker-compose contents to a file
+ composePath := filepath.Join(tempDir, "docker-compose.yml")
+ if err := os.WriteFile(composePath, renderedCompose.Bytes(), 0644); err != nil {
+ return fmt.Errorf("failed to write docker-compose file: %w", err)
+ }
+
+ projectOptions := cmdCompose.ProjectOptions{
+ ProjectName: plugin.Metadata.Name,
+ ConfigPaths: []string{composePath},
+ }
+
+ // Turn projectOptions into a project with default values
+ projectType, _, err := projectOptions.ToProject(ctx, pm.dockerCli, []string{})
+ if err != nil {
+ return err
+ }
+
+ // Update plugin status to deploying
+ if err := store.UpdateDeploymentStatus(ctx, plugin.Metadata.Name, "deploying"); err != nil {
+ return fmt.Errorf("failed to update deployment status: %w", err)
+ }
+
+ upOptions := api.UpOptions{
+ Create: api.CreateOptions{
+ RemoveOrphans: true,
+ QuietPull: true,
+ },
+ Start: api.StartOptions{
+ Wait: true,
+ },
+ }
+
+ // Start the project
+ err = pm.compose.Up(ctx, projectType, upOptions)
+ if err != nil {
+ _ = store.UpdateDeploymentStatus(ctx, plugin.Metadata.Name, "failed")
+ return fmt.Errorf("failed to start project: %w", err)
+ }
+
+ // Update status to deployed
+ if err := store.UpdateDeploymentStatus(ctx, plugin.Metadata.Name, "deployed"); err != nil {
+ return fmt.Errorf("failed to update deployment status: %w", err)
+ }
+
+ return nil
+}
diff --git a/pkg/plugin/store.go b/pkg/plugin/store.go
index 2f5641f..3328732 100644
--- a/pkg/plugin/store.go
+++ b/pkg/plugin/store.go
@@ -5,9 +5,11 @@ import (
"database/sql"
"encoding/json"
"fmt"
+ "strconv"
"time"
"github.com/chainlaunch/chainlaunch/pkg/db"
+ nodeservice "github.com/chainlaunch/chainlaunch/pkg/nodes/service"
"github.com/chainlaunch/chainlaunch/pkg/plugin/types"
)
@@ -22,6 +24,16 @@ type Store interface {
UpdateDeploymentStatus(ctx context.Context, name string, status string) error
GetDeploymentMetadata(ctx context.Context, name string) (map[string]interface{}, error)
GetDeploymentStatus(ctx context.Context, name string) (string, error)
+ ListKeyStoreIDs(ctx context.Context) ([]string, error)
+ ListFabricOrgs(ctx context.Context) ([]string, error)
+ ListKeyStoreOptions(ctx context.Context) ([]types.OptionItem, error)
+ ListFabricOrgOptions(ctx context.Context) ([]types.OptionItem, error)
+ ListFabricKeyOptions(ctx context.Context) ([]types.OptionItem, error)
+ // New methods for fetching details
+ GetFabricPeerDetails(ctx context.Context, id string) (*types.FabricPeerDetails, error)
+ GetFabricOrgDetails(ctx context.Context, id string) (*types.FabricOrgDetails, error)
+ GetKeyDetails(ctx context.Context, id string) (*types.KeyDetails, error)
+ GetFabricKeyDetails(ctx context.Context, keyIdString string, orgIdString string) (*types.FabricKeyDetails, error)
}
// Service represents a docker-compose service
@@ -49,13 +61,15 @@ type ServiceStatus struct {
// SQLStore implements the Store interface using SQL database
type SQLStore struct {
- queries *db.Queries
+ queries *db.Queries
+ nodeService *nodeservice.NodeService
}
// NewSQLStore creates a new SQL store
-func NewSQLStore(db *db.Queries) *SQLStore {
+func NewSQLStore(db *db.Queries, nodeService *nodeservice.NodeService) *SQLStore {
return &SQLStore{
- queries: db,
+ queries: db,
+ nodeService: nodeService,
}
}
@@ -335,3 +349,174 @@ func (s *SQLStore) GetDeploymentStatus(ctx context.Context, name string) (string
}
return status.String, nil
}
+
+// ListKeyStoreIDs fetches valid key IDs for x-source validation
+func (s *SQLStore) ListKeyStoreIDs(ctx context.Context) ([]string, error) {
+ // TODO: Query your DB for available key IDs
+ return []string{"key1", "key2"}, nil
+}
+
+// ListFabricOrgs fetches valid Fabric orgs for x-source validation
+func (s *SQLStore) ListFabricOrgs(ctx context.Context) ([]string, error) {
+ // TODO: Query your DB for available Fabric orgs
+ return []string{"orga", "orgb"}, nil
+}
+
+func (s *SQLStore) ListKeyStoreOptions(ctx context.Context) ([]types.OptionItem, error) {
+ rows, err := s.queries.ListKeys(ctx, &db.ListKeysParams{Limit: 100, Offset: 0})
+ if err != nil {
+ return nil, err
+ }
+ opts := make([]types.OptionItem, len(rows))
+ for i, row := range rows {
+ opts[i] = types.OptionItem{
+ Label: row.Name, // Show key name as label
+ Value: fmt.Sprintf("%d", row.ID), // Use key ID as value
+ }
+ }
+ return opts, nil
+}
+
+func (s *SQLStore) ListFabricOrgOptions(ctx context.Context) ([]types.OptionItem, error) {
+ rows, err := s.queries.ListFabricOrganizations(ctx)
+ if err != nil {
+ return nil, err
+ }
+ opts := make([]types.OptionItem, len(rows))
+ for i, row := range rows {
+ opts[i] = types.OptionItem{
+ Label: row.MspID, // Show MSP ID as label (or row.Description.String if you want description)
+ Value: fmt.Sprintf("%d", row.ID), // Use org ID as value
+ }
+ }
+ return opts, nil
+}
+
+// GetFabricPeerDetails retrieves details for a Fabric peer
+func (s *SQLStore) GetFabricPeerDetails(ctx context.Context, id string) (*types.FabricPeerDetails, error) {
+ peerID, err := strconv.ParseInt(id, 10, 64)
+ if err != nil {
+ return nil, fmt.Errorf("invalid peer ID format: %w", err)
+ }
+
+ peer, err := s.nodeService.GetNode(ctx, peerID)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get peer: %w", err)
+ }
+ if peer.FabricPeer == nil {
+ return nil, fmt.Errorf("peer is not a Fabric peer")
+ }
+
+ return &types.FabricPeerDetails{
+ ID: id,
+ Name: peer.Name,
+ ExternalEndpoint: peer.FabricPeer.ExternalEndpoint,
+ TLSCert: peer.FabricPeer.TLSCert,
+ MspID: peer.FabricPeer.MSPID,
+ OrgID: peer.FabricPeer.OrganizationID,
+ }, nil
+}
+
+// GetFabricOrgDetails retrieves details for a Fabric org
+func (s *SQLStore) GetFabricOrgDetails(ctx context.Context, id string) (*types.FabricOrgDetails, error) {
+ orgID, err := strconv.ParseInt(id, 10, 64)
+ if err != nil {
+ return nil, fmt.Errorf("invalid org ID format: %w", err)
+ }
+
+ org, err := s.queries.GetFabricOrganization(ctx, orgID)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get org: %w", err)
+ }
+
+ return &types.FabricOrgDetails{
+ ID: orgID,
+ MspID: org.MspID,
+ Description: org.Description.String,
+ }, nil
+}
+
+// GetKeyDetails retrieves details for a key
+func (s *SQLStore) GetKeyDetails(ctx context.Context, id string) (*types.KeyDetails, error) {
+ keyID, err := strconv.ParseInt(id, 10, 64)
+ if err != nil {
+ return nil, fmt.Errorf("invalid key ID format: %w", err)
+ }
+
+ key, err := s.queries.GetKey(ctx, keyID)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get key: %w", err)
+ }
+
+ return &types.KeyDetails{
+ ID: keyID,
+ Name: key.Name,
+ Type: key.Algorithm,
+ Description: key.Description.String,
+ }, nil
+}
+
+// GetFabricKeyDetails retrieves details for a Fabric key
+func (s *SQLStore) GetFabricKeyDetails(ctx context.Context, keyIdString string, orgIdString string) (*types.FabricKeyDetails, error) {
+ keyID, err := strconv.ParseInt(keyIdString, 10, 64)
+ if err != nil {
+ return nil, fmt.Errorf("invalid key ID format: %w", err)
+ }
+ orgID, err := strconv.ParseInt(orgIdString, 10, 64)
+ if err != nil {
+ return nil, fmt.Errorf("invalid org ID format: %w", err)
+ }
+ key, err := s.queries.GetKey(ctx, keyID)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get key: %w", err)
+ }
+
+ // Get the organization details for this key
+ org, err := s.queries.GetFabricOrganization(ctx, orgID)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get organization for key: %w", err)
+ }
+
+ return &types.FabricKeyDetails{
+ KeyID: keyID,
+ Name: key.Name,
+ Type: key.Algorithm,
+ Description: key.Description.String,
+ MspID: org.MspID,
+ Certificate: key.Certificate.String,
+ }, nil
+}
+
+func (s *SQLStore) ListFabricKeyOptions(ctx context.Context) ([]types.OptionItem, error) {
+ // Get all keys that have certificates (which are likely to be Fabric keys)
+ rows, err := s.queries.ListKeys(ctx, &db.ListKeysParams{Limit: 100, Offset: 0})
+ if err != nil {
+ return nil, err
+ }
+
+ // Get all Fabric organizations
+ orgs, err := s.queries.ListFabricOrganizations(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ // Create a map of org IDs to MSP IDs for quick lookup
+ orgMap := make(map[int64]string)
+ for _, org := range orgs {
+ orgMap[org.ID] = org.MspID
+ }
+
+ // Create options for each key-org combination
+ var opts []types.OptionItem
+ for _, key := range rows {
+ if key.Certificate.Valid {
+ for orgID, mspID := range orgMap {
+ opts = append(opts, types.OptionItem{
+ Label: fmt.Sprintf("%s (%s)", key.Name, mspID),
+ Value: fmt.Sprintf("%d:%d", key.ID, orgID),
+ })
+ }
+ }
+ }
+ return opts, nil
+}
diff --git a/pkg/plugin/types/option.go b/pkg/plugin/types/option.go
new file mode 100644
index 0000000..ad288ae
--- /dev/null
+++ b/pkg/plugin/types/option.go
@@ -0,0 +1,6 @@
+package types
+
+type OptionItem struct {
+ Label string `json:"label"`
+ Value string `json:"value"`
+}
diff --git a/pkg/plugin/types/schema_extensions.go b/pkg/plugin/types/schema_extensions.go
new file mode 100644
index 0000000..ef4f590
--- /dev/null
+++ b/pkg/plugin/types/schema_extensions.go
@@ -0,0 +1,68 @@
+package types
+
+import (
+ "encoding/json"
+ "fmt"
+)
+
+// XSourceField represents a field with an x-source extension.
+type XSourceField struct {
+ Name string
+ XSource string
+ Title string
+ Type string
+ Required bool
+}
+
+// ExtractXSourceFields parses a JSON schema and returns all fields with x-source extensions.
+func ExtractXSourceFields(schemaData []byte) ([]XSourceField, error) {
+ var schema map[string]interface{}
+ if err := json.Unmarshal(schemaData, &schema); err != nil {
+ return nil, fmt.Errorf("failed to unmarshal schema: %w", err)
+ }
+ props, ok := schema["properties"].(map[string]interface{})
+ if !ok {
+ return nil, nil
+ }
+ // Get required fields
+ requiredFields := map[string]bool{}
+ if reqList, ok := schema["required"].([]interface{}); ok {
+ for _, r := range reqList {
+ if s, ok := r.(string); ok {
+ requiredFields[s] = true
+ }
+ }
+ }
+ var result []XSourceField
+ for name, prop := range props {
+ propMap, ok := prop.(map[string]interface{})
+ if !ok {
+ continue
+ }
+ xSource, ok := propMap["x-source"].(string)
+ if !ok {
+ continue
+ }
+ title, _ := propMap["title"].(string)
+ typ, _ := propMap["type"].(string)
+ result = append(result, XSourceField{
+ Name: name,
+ XSource: xSource,
+ Title: title,
+ Type: typ,
+ Required: requiredFields[name],
+ })
+ }
+ return result, nil
+}
+
+// ValidateXSourceValue checks if a value is valid for a given x-source field.
+func ValidateXSourceValue(field XSourceField, value string, fetcher func(xSource string) []string) bool {
+ options := fetcher(field.XSource)
+ for _, opt := range options {
+ if opt == value {
+ return true
+ }
+ }
+ return false
+}
diff --git a/pkg/plugin/types/schema_extensions_test.go b/pkg/plugin/types/schema_extensions_test.go
new file mode 100644
index 0000000..639aca4
--- /dev/null
+++ b/pkg/plugin/types/schema_extensions_test.go
@@ -0,0 +1,57 @@
+package types
+
+import (
+ "testing"
+)
+
+func TestExtractXSourceFields(t *testing.T) {
+ schema := []byte(`{
+ "type": "object",
+ "properties": {
+ "KEY_ID": {
+ "type": "string",
+ "title": "Private Key",
+ "x-source": "keyStore"
+ },
+ "FABRIC_ORG": {
+ "type": "string",
+ "title": "Fabric Org",
+ "x-source": "fabricOrgs"
+ },
+ "PLAIN": {
+ "type": "string",
+ "title": "Plain"
+ }
+ },
+ "required": ["KEY_ID"]
+ }`)
+ fields, err := ExtractXSourceFields(schema)
+ if err != nil {
+ t.Fatalf("unexpected error: %v", err)
+ }
+ if len(fields) != 2 {
+ t.Fatalf("expected 2 x-source fields, got %d", len(fields))
+ }
+ if fields[0].Name != "KEY_ID" || fields[0].XSource != "keyStore" || !fields[0].Required {
+ t.Errorf("unexpected field: %+v", fields[0])
+ }
+ if fields[1].Name != "FABRIC_ORG" || fields[1].XSource != "fabricOrgs" || fields[1].Required {
+ t.Errorf("unexpected field: %+v", fields[1])
+ }
+}
+
+func TestValidateXSourceValue(t *testing.T) {
+ field := XSourceField{Name: "KEY_ID", XSource: "keyStore"}
+ fetcher := func(xSource string) []string {
+ if xSource == "keyStore" {
+ return []string{"key1", "key2"}
+ }
+ return nil
+ }
+ if !ValidateXSourceValue(field, "key1", fetcher) {
+ t.Error("expected key1 to be valid")
+ }
+ if ValidateXSourceValue(field, "key3", fetcher) {
+ t.Error("expected key3 to be invalid")
+ }
+}
diff --git a/pkg/plugin/types/types.go b/pkg/plugin/types/types.go
index 853ce7c..665d6e0 100644
--- a/pkg/plugin/types/types.go
+++ b/pkg/plugin/types/types.go
@@ -17,14 +17,20 @@ type Plugin struct {
// Metadata contains plugin metadata
type Metadata struct {
- Name string `json:"name" yaml:"name"`
- Version string `json:"version" yaml:"version"`
+ Name string `json:"name" yaml:"name"`
+ Version string `json:"version" yaml:"version"`
+ Description string `json:"description" yaml:"description"`
+ Author string `json:"author" yaml:"author"`
+ Tags []string `json:"tags,omitempty" yaml:"tags,omitempty"`
+ Repository string `json:"repository,omitempty" yaml:"repository,omitempty"`
+ License string `json:"license,omitempty" yaml:"license,omitempty"`
}
// Spec contains the plugin specification
type Spec struct {
DockerCompose DockerCompose `json:"dockerCompose" yaml:"dockerCompose"`
Parameters Parameters `json:"parameters" yaml:"parameters"`
+ Documentation Documentation `json:"documentation" yaml:"documentation"`
}
// DockerCompose contains the docker-compose configuration
@@ -32,20 +38,34 @@ type DockerCompose struct {
Contents string `json:"contents" yaml:"contents"`
}
+// XSourceType defines the possible values for x-source
+type XSourceType string
+
+const (
+ XSourceFabricPeer XSourceType = "fabric-peer"
+ XSourceKey XSourceType = "key"
+ XSourceFabricOrg XSourceType = "fabric-org"
+ XSourceFabricNetwork XSourceType = "fabric-network"
+ XSourceFabricKey XSourceType = "fabric-key"
+)
+
// Parameters defines the plugin parameters schema
type Parameters struct {
Schema string `json:"$schema" yaml:"$schema"`
Type string `json:"type" yaml:"type"`
Properties map[string]ParameterSpec `json:"properties" yaml:"properties"`
Required []string `json:"required" yaml:"required"`
+ // XSource defines the source type for plugin parameters
+ // Can be one of: fabric-peer, key, fabric-org, fabric-network
}
// ParameterSpec defines a single parameter specification
type ParameterSpec struct {
- Type string `json:"type" yaml:"type"`
- Description string `json:"description" yaml:"description"`
- Default string `json:"default,omitempty" yaml:"default,omitempty"`
- Enum []string `json:"enum,omitempty" yaml:"enum,omitempty"`
+ Type string `json:"type" yaml:"type"`
+ Description string `json:"description" yaml:"description"`
+ Default string `json:"default,omitempty" yaml:"default,omitempty"`
+ Enum []string `json:"enum,omitempty" yaml:"enum,omitempty"`
+ XSource XSourceType `json:"x-source,omitempty" yaml:"x-source,omitempty"`
}
// DeploymentStatus represents the status of a plugin deployment
@@ -128,3 +148,63 @@ func (p *Plugin) Validate() error {
return nil
}
+
+// FabricPeerDetails represents the details of a Fabric peer
+type FabricPeerDetails struct {
+ ID string `json:"id"`
+ Name string `json:"name"`
+ ExternalEndpoint string `json:"externalEndpoint"`
+ TLSCert string `json:"tlsCert"`
+ MspID string `json:"mspId"`
+ OrgID int64 `json:"orgId"`
+}
+
+// FabricOrgDetails represents the details of a Fabric organization
+type FabricOrgDetails struct {
+ ID int64 `json:"id"`
+ MspID string `json:"mspId"`
+ Description string `json:"description"`
+}
+
+// KeyDetails represents the details of a key
+type KeyDetails struct {
+ ID int64 `json:"id"`
+ Name string `json:"name"`
+ Type string `json:"type"`
+ Description string `json:"description"`
+}
+
+// FabricKeyDetails represents the details of a Fabric key
+type FabricKeyDetails struct {
+ KeyID int64 `json:"keyId"`
+ OrgID int64 `json:"orgId"`
+ Name string `json:"name"`
+ Type string `json:"type"`
+ Description string `json:"description"`
+ MspID string `json:"mspId"`
+ Certificate string `json:"certificate"`
+}
+
+// Documentation contains plugin documentation information
+type Documentation struct {
+ // README contains the main documentation for the plugin
+ README string `json:"readme" yaml:"readme"`
+ // Examples contains example configurations and usage
+ Examples []Example `json:"examples,omitempty" yaml:"examples,omitempty"`
+ // Troubleshooting contains common issues and their solutions
+ Troubleshooting []TroubleshootingItem `json:"troubleshooting,omitempty" yaml:"troubleshooting,omitempty"`
+}
+
+// Example represents a usage example for the plugin
+type Example struct {
+ Name string `json:"name" yaml:"name"`
+ Description string `json:"description" yaml:"description"`
+ Parameters map[string]interface{} `json:"parameters" yaml:"parameters"`
+}
+
+// TroubleshootingItem represents a common issue and its solution
+type TroubleshootingItem struct {
+ Problem string `json:"problem" yaml:"problem"`
+ Solution string `json:"solution" yaml:"solution"`
+ Description string `json:"description,omitempty" yaml:"description,omitempty"`
+}
diff --git a/pkg/plugin/xsource/fabric_key.go b/pkg/plugin/xsource/fabric_key.go
new file mode 100644
index 0000000..c32d8fd
--- /dev/null
+++ b/pkg/plugin/xsource/fabric_key.go
@@ -0,0 +1,182 @@
+package xsource
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "path/filepath"
+
+ "github.com/chainlaunch/chainlaunch/pkg/db"
+ key "github.com/chainlaunch/chainlaunch/pkg/keymanagement/service"
+ nodeservice "github.com/chainlaunch/chainlaunch/pkg/nodes/service"
+)
+
+type FabricKeyValue struct {
+ BaseXSourceValue
+ KeyID int64
+ OrgID int64
+ queries *db.Queries
+ nodeService *nodeservice.NodeService
+ keyManagement *key.KeyManagementService
+}
+
+// NewFabricKeyHandler creates a new handler for fabric-key x-source type
+func NewFabricKeyHandler(queries *db.Queries, nodeService *nodeservice.NodeService, keyManagement *key.KeyManagementService) XSourceHandler {
+ return &fabricKeyHandler{
+ queries: queries,
+ nodeService: nodeService,
+ keyManagement: keyManagement,
+ }
+}
+
+type fabricKeyHandler struct {
+ queries *db.Queries
+ nodeService *nodeservice.NodeService
+ keyManagement *key.KeyManagementService
+}
+
+func (h *fabricKeyHandler) GetType() XSourceType {
+ return FabricKey
+}
+
+func (h *fabricKeyHandler) CreateValue(key string, rawValue interface{}) (XSourceValue, error) {
+ keyMap, ok := rawValue.(map[string]interface{})
+ if !ok {
+ return nil, fmt.Errorf("fabric-key value must be an object with keyId and orgId")
+ }
+ keyID, keyOk := keyMap["keyId"].(float64)
+ orgID, orgOk := keyMap["orgId"].(float64)
+ if !keyOk || !orgOk {
+ return nil, fmt.Errorf("invalid fabric key format: both keyId and orgId are required")
+ }
+
+ return &FabricKeyValue{
+ BaseXSourceValue: BaseXSourceValue{RawValue: rawValue, Key: key},
+ KeyID: int64(keyID),
+ OrgID: int64(orgID),
+ queries: h.queries,
+ nodeService: h.nodeService,
+ keyManagement: h.keyManagement,
+ }, nil
+}
+
+func (h *fabricKeyHandler) ListOptions(ctx context.Context) ([]OptionItem, error) {
+ value := &FabricKeyValue{
+ queries: h.queries,
+ nodeService: h.nodeService,
+ }
+ return value.ListOptions(ctx)
+}
+
+func (v *FabricKeyValue) ListOptions(ctx context.Context) ([]OptionItem, error) {
+ // TODO: Implement listing available keys
+ return []OptionItem{}, nil
+}
+
+func (v *FabricKeyValue) Validate(ctx context.Context) error {
+ // TODO: Implement validation
+ return nil
+}
+
+// FabricKeyDetails represents the details of a Fabric key
+type FabricKeyDetails struct {
+ KeyID int64
+ OrgID int64
+ Name string
+ Type string
+ Description string
+ MspID string
+ Certificate string
+ PrivateKey string
+ CertPath string // Path inside the container
+ KeyPath string // Path inside the container
+}
+
+func (v *FabricKeyValue) GetValue(ctx context.Context) (interface{}, error) {
+ // Get key details from key management service
+ key, err := v.keyManagement.GetKey(ctx, int(v.KeyID))
+ if err != nil {
+ return nil, fmt.Errorf("failed to get key: %w", err)
+ }
+
+ // Get organization details to get MSP ID
+ org, err := v.queries.GetFabricOrganization(ctx, v.OrgID)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get organization: %w", err)
+ }
+
+ // Get private key
+ privateKey, err := v.keyManagement.GetDecryptedPrivateKey(int(v.KeyID))
+ if err != nil {
+ return nil, fmt.Errorf("failed to get private key: %w", err)
+ }
+
+ // Create key details
+ details := &FabricKeyDetails{
+ KeyID: v.KeyID,
+ OrgID: v.OrgID,
+ Name: key.Name,
+ Type: string(key.Algorithm),
+ Description: *key.Description,
+ MspID: org.MspID,
+ Certificate: *key.Certificate,
+ PrivateKey: privateKey,
+ CertPath: "/etc/chainlaunch/key/cert.pem",
+ KeyPath: "/etc/chainlaunch/key/key.pem",
+ }
+
+ return details, nil
+}
+
+func (v *FabricKeyValue) GetVolumeMounts(ctx context.Context) ([]VolumeMount, error) {
+ var mounts []VolumeMount
+
+ // Get key details
+ key, err := v.keyManagement.GetKey(ctx, int(v.KeyID))
+ if err != nil {
+ return nil, fmt.Errorf("failed to get key: %w", err)
+ }
+
+ // Get private key
+ privateKey, err := v.keyManagement.GetDecryptedPrivateKey(int(v.KeyID))
+ if err != nil {
+ return nil, fmt.Errorf("failed to get private key: %w", err)
+ }
+
+ // Create a temporary directory for the key
+ tempDir := fmt.Sprintf("/tmp/chainlaunch/key/%d", v.KeyID)
+ if err := os.MkdirAll(tempDir, 0755); err != nil {
+ return nil, fmt.Errorf("failed to create temp directory: %w", err)
+ }
+
+ // Create certificate file
+ certPath := filepath.Join(tempDir, "cert.pem")
+ if err := os.WriteFile(certPath, []byte(*key.Certificate), 0644); err != nil {
+ return nil, fmt.Errorf("failed to write certificate: %w", err)
+ }
+
+ // Create private key file
+ keyPath := filepath.Join(tempDir, "key.pem")
+ if err := os.WriteFile(keyPath, []byte(privateKey), 0600); err != nil {
+ return nil, fmt.Errorf("failed to write private key: %w", err)
+ }
+
+ // Add volume mounts
+ mounts = append(mounts, VolumeMount{
+ Source: certPath,
+ Target: fmt.Sprintf("/etc/chainlaunch/%s/cert.pem", v.Key),
+ Type: "bind",
+ ReadOnly: true,
+ Description: fmt.Sprintf("Fabric key certificate for %s", key.Name),
+ })
+
+ mounts = append(mounts, VolumeMount{
+ Source: keyPath,
+ Target: fmt.Sprintf("/etc/chainlaunch/%s/key.pem", v.Key),
+ Type: "bind",
+ ReadOnly: true,
+ Description: fmt.Sprintf("Fabric private key for %s", key.Name),
+ })
+
+ return mounts, nil
+}
diff --git a/pkg/plugin/xsource/fabric_peer.go b/pkg/plugin/xsource/fabric_peer.go
new file mode 100644
index 0000000..19acb77
--- /dev/null
+++ b/pkg/plugin/xsource/fabric_peer.go
@@ -0,0 +1,218 @@
+package xsource
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "path/filepath"
+ "strconv"
+
+ "github.com/chainlaunch/chainlaunch/pkg/db"
+ nodeservice "github.com/chainlaunch/chainlaunch/pkg/nodes/service"
+ "github.com/chainlaunch/chainlaunch/pkg/nodes/types"
+)
+
+// FabricPeerValue represents a fabric-peer x-source value
+type FabricPeerValue struct {
+ BaseXSourceValue
+ PeerIDs []string
+ queries *db.Queries
+ nodeService *nodeservice.NodeService
+}
+
+// NewFabricPeerHandler creates a new handler for fabric-peer x-source type
+func NewFabricPeerHandler(queries *db.Queries, nodeService *nodeservice.NodeService) XSourceHandler {
+ return &fabricPeerHandler{
+ queries: queries,
+ nodeService: nodeService,
+ }
+}
+
+type fabricPeerHandler struct {
+ queries *db.Queries
+ nodeService *nodeservice.NodeService
+}
+
+func (h *fabricPeerHandler) GetType() XSourceType {
+ return FabricPeer
+}
+
+func (h *fabricPeerHandler) CreateValue(key string, rawValue interface{}) (XSourceValue, error) {
+ var peerIDs []string
+
+ switch v := rawValue.(type) {
+ case string:
+ peerIDs = []string{v}
+ case float64:
+ peerIDs = []string{strconv.FormatInt(int64(v), 10)}
+ case int:
+ peerIDs = []string{strconv.FormatInt(int64(v), 10)}
+ case []interface{}:
+ peerIDs = make([]string, len(v))
+ for i, item := range v {
+ switch val := item.(type) {
+ case string:
+ peerIDs[i] = val
+ case float64:
+ peerIDs[i] = strconv.FormatInt(int64(val), 10)
+ case int:
+ peerIDs[i] = strconv.FormatInt(int64(val), 10)
+ default:
+ return nil, fmt.Errorf("fabric-peer array elements must be strings or numbers")
+ }
+ }
+ default:
+ return nil, fmt.Errorf("fabric-peer value must be a string, number, or array of strings/numbers")
+ }
+
+ return &FabricPeerValue{
+ BaseXSourceValue: BaseXSourceValue{RawValue: rawValue, Key: key},
+ PeerIDs: peerIDs,
+ queries: h.queries,
+ nodeService: h.nodeService,
+ }, nil
+}
+
+func (h *fabricPeerHandler) ListOptions(ctx context.Context) ([]OptionItem, error) {
+ value := &FabricPeerValue{
+ queries: h.queries,
+ nodeService: h.nodeService,
+ }
+ return value.ListOptions(ctx)
+}
+
+func (v *FabricPeerValue) ListOptions(ctx context.Context) ([]OptionItem, error) {
+ // Get all Fabric peers from the node service
+ platform := types.PlatformFabric
+ peers, err := v.nodeService.ListNodes(ctx, &platform, 1, 100)
+ if err != nil {
+ return nil, fmt.Errorf("failed to list peers: %w", err)
+ }
+
+ var opts []OptionItem
+ for _, peer := range peers.Items {
+ if peer.NodeType == types.NodeTypeFabricPeer {
+ opts = append(opts, OptionItem{
+ Label: fmt.Sprintf("%s (%s)", peer.Name, peer.FabricPeer.MSPID),
+ Value: fmt.Sprintf("%d", peer.ID),
+ })
+ }
+ }
+ return opts, nil
+}
+
+func (v *FabricPeerValue) Validate(ctx context.Context) error {
+ options, err := v.ListOptions(ctx)
+ if err != nil {
+ return fmt.Errorf("failed to list fabric peer options: %w", err)
+ }
+
+ // Create a map of valid peer IDs for faster lookup
+ validPeers := make(map[string]bool)
+ for _, opt := range options {
+ validPeers[opt.Value] = true
+ }
+
+ // Validate each peer ID
+ for _, peerID := range v.PeerIDs {
+ if !validPeers[peerID] {
+ return fmt.Errorf("invalid fabric peer ID: %s", peerID)
+ }
+ }
+
+ return nil
+}
+
+func (v *FabricPeerValue) GetValue(ctx context.Context) (interface{}, error) {
+ var details []*FabricPeerDetails
+
+ for _, peerIDStr := range v.PeerIDs {
+ peerID, err := strconv.ParseInt(peerIDStr, 10, 64)
+ if err != nil {
+ return nil, fmt.Errorf("invalid peer ID format: %w", err)
+ }
+
+ peer, err := v.nodeService.GetNode(ctx, peerID)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get peer: %w", err)
+ }
+
+ if peer.FabricPeer == nil {
+ return nil, fmt.Errorf("peer is not a Fabric peer")
+ }
+
+ // Define the TLS cert path inside the container
+ tlsCertPath := fmt.Sprintf("/etc/chainlaunch/peers/%d/tls/cert.pem", peerID)
+
+ details = append(details, &FabricPeerDetails{
+ ID: peerID,
+ Name: peer.Name,
+ ExternalEndpoint: peer.FabricPeer.ExternalEndpoint,
+ TLSCert: peer.FabricPeer.TLSCert,
+ MspID: peer.FabricPeer.MSPID,
+ OrgID: peer.FabricPeer.OrganizationID,
+ TLSCertPath: tlsCertPath,
+ })
+ }
+
+ // If there's only one peer, return it directly
+ if len(details) == 1 {
+ return details[0], nil
+ }
+
+ return details, nil
+}
+
+// GetVolumeMounts returns the volume mounts needed for this peer
+func (v *FabricPeerValue) GetVolumeMounts(ctx context.Context) ([]VolumeMount, error) {
+ var mounts []VolumeMount
+
+ for _, peerIDStr := range v.PeerIDs {
+ peerID, err := strconv.ParseInt(peerIDStr, 10, 64)
+ if err != nil {
+ return nil, fmt.Errorf("invalid peer ID format: %w", err)
+ }
+
+ peer, err := v.nodeService.GetNode(ctx, peerID)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get peer: %w", err)
+ }
+
+ if peer.FabricPeer == nil {
+ return nil, fmt.Errorf("peer is not a Fabric peer")
+ }
+
+ // Create a temporary file for the TLS cert
+ tempDir := fmt.Sprintf("/tmp/chainlaunch/peers/%d/tls", peerID)
+ if err := os.MkdirAll(tempDir, 0755); err != nil {
+ return nil, fmt.Errorf("failed to create temp directory: %w", err)
+ }
+
+ certPath := filepath.Join(tempDir, "cert.pem")
+ if err := os.WriteFile(certPath, []byte(peer.FabricPeer.TLSCert), 0644); err != nil {
+ return nil, fmt.Errorf("failed to write TLS cert: %w", err)
+ }
+
+ // Add volume mount for the TLS cert
+ mounts = append(mounts, VolumeMount{
+ Source: certPath,
+ Target: fmt.Sprintf("/etc/chainlaunch/peers/%d/tls/cert.pem", peerID),
+ Type: "bind",
+ ReadOnly: true,
+ Description: fmt.Sprintf("TLS certificate for peer %s", peer.Name),
+ })
+ }
+
+ return mounts, nil
+}
+
+// FabricPeerDetails represents the details of a Fabric peer
+type FabricPeerDetails struct {
+ ID int64
+ Name string
+ ExternalEndpoint string
+ TLSCert string
+ MspID string
+ OrgID int64
+ TLSCertPath string // Path inside the container
+}
diff --git a/pkg/plugin/xsource/registry.go b/pkg/plugin/xsource/registry.go
new file mode 100644
index 0000000..a2b8460
--- /dev/null
+++ b/pkg/plugin/xsource/registry.go
@@ -0,0 +1,71 @@
+package xsource
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/chainlaunch/chainlaunch/pkg/db"
+ key "github.com/chainlaunch/chainlaunch/pkg/keymanagement/service"
+ nodeservice "github.com/chainlaunch/chainlaunch/pkg/nodes/service"
+)
+
+// Registry manages x-source handlers
+type Registry struct {
+ handlers map[XSourceType]XSourceHandler
+}
+
+// NewRegistry creates a new registry with the default handlers
+func NewRegistry(queries *db.Queries, nodeService *nodeservice.NodeService, keyManagement *key.KeyManagementService) *Registry {
+ r := &Registry{
+ handlers: make(map[XSourceType]XSourceHandler),
+ }
+
+ // Register default handlers
+ r.Register(NewFabricKeyHandler(queries, nodeService, keyManagement))
+ r.Register(NewFabricPeerHandler(queries, nodeService))
+
+ return r
+}
+
+// Register adds a new handler to the registry
+func (r *Registry) Register(handler XSourceHandler) {
+ r.handlers[handler.GetType()] = handler
+}
+
+// GetHandler returns the handler for the specified x-source type
+func (r *Registry) GetHandler(xSourceType XSourceType) (XSourceHandler, error) {
+ handler, ok := r.handlers[xSourceType]
+ if !ok {
+ return nil, fmt.Errorf("no handler registered for x-source type: %s", xSourceType)
+ }
+ return handler, nil
+}
+
+// ValidateAndProcess validates and processes an x-source value
+func (r *Registry) ValidateAndProcess(ctx context.Context, xSourceType XSourceType, key string, value interface{}) (interface{}, error) {
+ handler, err := r.GetHandler(xSourceType)
+ if err != nil {
+ return nil, err
+ }
+
+ xSourceValue, err := handler.CreateValue(key, value)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := xSourceValue.Validate(ctx); err != nil {
+ return nil, err
+ }
+
+ return xSourceValue.GetValue(ctx)
+}
+
+// ListOptions returns the valid options for the specified x-source type
+func (r *Registry) ListOptions(ctx context.Context, xSourceType XSourceType) ([]OptionItem, error) {
+ handler, err := r.GetHandler(xSourceType)
+ if err != nil {
+ return nil, err
+ }
+
+ return handler.ListOptions(ctx)
+}
diff --git a/pkg/plugin/xsource/types.go b/pkg/plugin/xsource/types.go
new file mode 100644
index 0000000..d919876
--- /dev/null
+++ b/pkg/plugin/xsource/types.go
@@ -0,0 +1,69 @@
+package xsource
+
+import (
+ "context"
+ "fmt"
+)
+
+// XSourceType represents the type of x-source
+type XSourceType string
+
+const (
+ FabricKey XSourceType = "fabric-key"
+ FabricPeer XSourceType = "fabric-peer"
+ FabricOrg XSourceType = "fabric-org"
+ KeyStore XSourceType = "keyStore"
+)
+
+// VolumeMount represents a volume mount configuration
+type VolumeMount struct {
+ Source string
+ Target string
+ Type string // "bind" or "volume"
+ ReadOnly bool
+ Description string
+}
+
+// XSourceValue represents a value that can be validated and processed
+type XSourceValue interface {
+ // Validate checks if the value is valid for this x-source type
+ Validate(ctx context.Context) error
+ // GetValue returns the processed value that can be used in templates
+ GetValue(ctx context.Context) (interface{}, error)
+ // GetValidationValue returns the value used for validation
+ GetValidationValue() string
+ // GetVolumeMounts returns the volume mounts needed for this x-source
+ GetVolumeMounts(ctx context.Context) ([]VolumeMount, error)
+}
+
+// XSourceHandler defines the interface for handling x-source types
+type XSourceHandler interface {
+ // GetType returns the type of x-source this handler manages
+ GetType() XSourceType
+ // CreateValue creates a new XSourceValue from the raw input
+ CreateValue(key string, rawValue interface{}) (XSourceValue, error)
+ // ListOptions returns the list of valid options for this x-source type
+ ListOptions(ctx context.Context) ([]OptionItem, error)
+}
+
+// OptionItem represents a selectable option for an x-source
+type OptionItem struct {
+ Label string
+ Value string
+}
+
+// BaseXSourceValue provides common functionality for x-source values
+type BaseXSourceValue struct {
+ Key string
+ RawValue interface{}
+}
+
+// GetValidationValue returns the string representation of the value for validation
+func (b *BaseXSourceValue) GetValidationValue() string {
+ return fmt.Sprintf("%v", b.RawValue)
+}
+
+// GetVolumeMounts returns an empty slice of volume mounts
+func (b *BaseXSourceValue) GetVolumeMounts(ctx context.Context) ([]VolumeMount, error) {
+ return []VolumeMount{}, nil
+}
diff --git a/web/.gitignore b/web/.gitignore
index 38d7344..4da5a8b 100644
--- a/web/.gitignore
+++ b/web/.gitignore
@@ -11,3 +11,8 @@ dist/
.vscode/*
!.vscode/extensions.json
.idea
+
+# Playwright
+playwright-report/
+playwright/.auth/
+test-results
\ No newline at end of file
diff --git a/web/README.md b/web/README.md
index 37b1dd3..2c93a29 100644
--- a/web/README.md
+++ b/web/README.md
@@ -27,3 +27,17 @@ Preview the production build locally:
```bash
pnpm preview
```
+
+# Playwright E2E Tests
+
+Run Playwright tests:
+
+```sh
+bun run test:e2e
+```
+
+Open Playwright UI:
+
+```sh
+bun run test:e2e:dev
+```
diff --git a/web/bun.lockb b/web/bun.lockb
index 98c7965..339322c 100755
Binary files a/web/bun.lockb and b/web/bun.lockb differ
diff --git a/web/package.json b/web/package.json
index 3f812f3..50452d1 100644
--- a/web/package.json
+++ b/web/package.json
@@ -6,13 +6,17 @@
"node1:dev": "export PUBLIC_API_URL=http://localhost:8100 && rsbuild dev --port=3100 --config=rsbuild.dev.config.ts",
"node2:dev": "export PUBLIC_API_URL=http://localhost:8102 && rsbuild dev --port=3102 --config=rsbuild.dev.config.ts",
"node3:dev": "export PUBLIC_API_URL=http://localhost:8103 && rsbuild dev --port=3103 --config=rsbuild.dev.config.ts",
- "dev": "export PUBLIC_API_URL=http://localhost:8100 && rsbuild dev --port=3100",
"build": "rsbuild build",
- "preview": "rsbuild preview"
+ "preview": "rsbuild preview",
+ "test:e2e": "playwright test",
+ "test:e2e:dev": "playwright test --ui"
},
"dependencies": {
+ "@emotion/react": "^11.11.0",
+ "@emotion/styled": "^11.11.0",
"@hey-api/client-fetch": "^0.5.7",
"@hookform/resolvers": "^3.9.1",
+ "@mui/material": "^5.13.0",
"@peculiar/x509": "^1.12.3",
"@radix-ui/react-alert-dialog": "^1.1.4",
"@radix-ui/react-avatar": "^1.1.2",
@@ -29,8 +33,9 @@
"@radix-ui/react-separator": "^1.1.1",
"@radix-ui/react-slot": "^1.1.1",
"@radix-ui/react-switch": "^1.1.2",
- "@radix-ui/react-tabs": "^1.1.2",
+ "@radix-ui/react-tabs": "^1.1.12",
"@radix-ui/react-tooltip": "^1.1.6",
+ "@tailwindcss/typography": "^0.5.16",
"@tanstack/react-query": "^5.62.11",
"@tanstack/react-table": "^8.21.2",
"ansi-to-html": "^0.7.2",
@@ -41,20 +46,21 @@
"cmdk": "1.0.0",
"date-fns": "^4.1.0",
"dompurify": "^3.2.5",
- "lucide-react": "^0.469.0",
+ "lucide-react": "^0.509.0",
"next-themes": "^0.4.4",
"postcss": "^8.4.49",
- "react": "^19.0.0",
- "react-dom": "^19.0.0",
+ "react": "^18.2.0",
+ "react-dom": "^18.2.0",
"react-hook-form": "^7.54.2",
- "react-markdown": "^9.0.3",
- "react-router-dom": "^7.1.1",
+ "react-markdown": "^10.1.0",
+ "react-router-dom": "^6.11.1",
"react-syntax-highlighter": "^15.6.1",
- "recharts": "^2.15.0",
+ "recharts": "^2.15.3",
"redoc": "^2.5.0",
"rehype-highlight": "^7.0.2",
"rehype-raw": "^7.0.0",
"sonner": "^1.7.1",
+ "swagger-ui-react": "^5.22.0",
"tailwind-merge": "^2.6.0",
"tailwindcss": "^3.4.17",
"tailwindcss-animate": "^1.0.7",
@@ -62,12 +68,20 @@
},
"devDependencies": {
"@hey-api/openapi-ts": "^0.62.0",
+ "@playwright/test": "^1.52.0",
"@rsbuild/core": "^1.3.15",
"@rsbuild/plugin-react": "^1.3.0",
"@rsbuild/plugin-svgr": "^1.2.0",
- "@types/react": "^19.0.0",
- "@types/react-dom": "^19.0.0",
+ "@types/react": "^18.2.6",
+ "@types/react-dom": "^18.2.4",
"@types/react-syntax-highlighter": "^15.5.13",
- "typescript": "^5.7.2"
+ "@typescript-eslint/eslint-plugin": "^5.59.5",
+ "@typescript-eslint/parser": "^5.59.5",
+ "@vitejs/plugin-react": "^4.0.0",
+ "eslint": "^8.40.0",
+ "eslint-plugin-react-hooks": "^4.6.0",
+ "eslint-plugin-react-refresh": "^0.4.1",
+ "typescript": "^5.0.4",
+ "vite": "^4.3.5"
}
}
diff --git a/web/playwright.config.ts b/web/playwright.config.ts
new file mode 100644
index 0000000..a9bbaf5
--- /dev/null
+++ b/web/playwright.config.ts
@@ -0,0 +1,25 @@
+import { defineConfig, devices } from '@playwright/test';
+
+export default defineConfig({
+ testDir: './playwright/tests',
+ timeout: 30 * 1000,
+ expect: {
+ timeout: 5000,
+ },
+ fullyParallel: true,
+ forbidOnly: !!process.env.CI,
+ retries: process.env.CI ? 2 : 0,
+ workers: process.env.CI ? 1 : undefined,
+ reporter: 'html',
+ use: {
+ trace: 'on-first-retry',
+ baseURL: process.env.PLAYWRIGHT_BASE_URL || 'http://localhost:3100',
+ screenshot: 'only-on-failure', // Take screenshots only on test failures
+ },
+ projects: [
+ {
+ name: 'chromium',
+ use: { ...devices['Desktop Chrome'] },
+ },
+ ],
+});
\ No newline at end of file
diff --git a/web/playwright/tests/basic.spec.ts b/web/playwright/tests/basic.spec.ts
new file mode 100644
index 0000000..fe06871
--- /dev/null
+++ b/web/playwright/tests/basic.spec.ts
@@ -0,0 +1,6 @@
+import { test, expect } from '@playwright/test'
+
+test('homepage loads and has a title', async ({ page }) => {
+ await page.goto('/')
+ await expect(page).toHaveTitle(/./)
+})
diff --git a/web/playwright/tests/create-node-fabric-peer-logic.ts b/web/playwright/tests/create-node-fabric-peer-logic.ts
new file mode 100644
index 0000000..c276e47
--- /dev/null
+++ b/web/playwright/tests/create-node-fabric-peer-logic.ts
@@ -0,0 +1,60 @@
+import { Page } from '@playwright/test'
+
+const FABRIC_NODE_CREATE_PATH = '/nodes/fabric/create'
+
+// Helper to generate unique values with cryptographically secure random numbers
+function uniqueSuffix() {
+ const bytes = new Uint8Array(4)
+ crypto.getRandomValues(bytes)
+ const randomNum = new DataView(bytes.buffer).getUint32(0) % 10000
+ return `${Date.now()}-${randomNum}`
+}
+
+/**
+ * Creates a Fabric node via the UI.
+ *
+ * @param page Playwright page instance
+ * @param baseURL Base URL of the app
+ * @param mspId The MSP ID of the organization to select
+ * @returns The node name used for creation
+ */
+export async function createFabricNode(page: Page, baseURL: string, mspId: string): Promise {
+ await page.goto(baseURL + FABRIC_NODE_CREATE_PATH)
+ await page.getByRole('heading', { name: /create fabric node/i }).waitFor({ state: 'visible', timeout: 10000 })
+
+ // Fill out the form with unique values
+ const nodeName = `test-node-${uniqueSuffix()}`
+
+ // Name
+ await page.getByPlaceholder('Enter node name').fill(nodeName)
+
+ // Select the organization just created (by MSP ID)
+ const orgSelect = page.getByRole('combobox', { name: /organization/i })
+ await orgSelect.click()
+ await page.getByRole('option', { name: mspId }).click()
+
+ // Select deployment mode "Docker"
+ const modeSelect = page.getByRole('combobox', { name: /mode/i })
+ await modeSelect.click()
+ await page.getByRole('option', { name: /docker/i }).click()
+
+ // Listen Address - use crypto.getRandomValues for secure random port
+ const listenPort = 7000 + (new DataView(crypto.getRandomValues(new Uint8Array(4)).buffer).getUint32(0) % 1000)
+ await page.getByPlaceholder('e.g., 0.0.0.0:7051').fill(`0.0.0.0:${listenPort}`)
+
+ // Operations Address - use crypto.getRandomValues for secure random port
+ const opsPort = 9000 + (new DataView(crypto.getRandomValues(new Uint8Array(4)).buffer).getUint32(0) % 1000)
+ await page.getByPlaceholder('e.g., 0.0.0.0:9443').fill(`0.0.0.0:${opsPort}`)
+
+ // External Endpoint - use crypto.getRandomValues for secure random port
+ const extPort = 7000 + (new DataView(crypto.getRandomValues(new Uint8Array(4)).buffer).getUint32(0) % 1000)
+ await page.getByPlaceholder('e.g., peer0.org1.example.com:7051').fill(`peer0.example.com:${extPort}`)
+
+ // Submit
+ await page.getByRole('button', { name: /create node/i }).click()
+ await page.waitForLoadState('networkidle')
+ // Wait for navigation to the node detail page or nodes list
+ await page.getByText(/General Information/i).waitFor({ state: 'visible', timeout: 60000 })
+
+ return nodeName
+}
diff --git a/web/playwright/tests/create-node-fabric-peer-wizard.spec.ts b/web/playwright/tests/create-node-fabric-peer-wizard.spec.ts
new file mode 100644
index 0000000..f6ff0f1
--- /dev/null
+++ b/web/playwright/tests/create-node-fabric-peer-wizard.spec.ts
@@ -0,0 +1,68 @@
+import { test, expect } from '@playwright/test'
+import { login } from './login'
+import { createOrganization } from './create-organization'
+// Helper to generate unique values
+function uniqueSuffix() {
+ const bytes = new Uint8Array(4)
+ crypto.getRandomValues(bytes)
+ const randomNum = new DataView(bytes.buffer).getUint32(0) % 10000
+ return `${Date.now()}-${randomNum}`
+}
+
+const NODE_CREATE_PATH = '/nodes/create'
+
+// This test assumes the admin user is set in env vars for login
+// and that at least one organization exists to select
+
+test('can create a Fabric peer node using the NodeCreationWizard', async ({ page, baseURL }) => {
+ // Step 1: Login as admin
+ await login(page, baseURL ?? '')
+ await createOrganization(page, baseURL ?? '', { mspId: `test-msp-${uniqueSuffix()}`, description: 'Test organization' })
+ // Step 2: Go to node creation wizard
+ await page.goto((baseURL ?? '') + NODE_CREATE_PATH)
+ await expect(page.getByRole('heading', { name: /create node/i })).toBeVisible()
+
+ // Step 3: Wizard - Select Protocol (Fabric)
+ const nodeName = `test-peer-${uniqueSuffix()}`
+ await page.getByPlaceholder('Enter node name').fill(nodeName)
+ await page.getByRole('button', { name: 'Fabric' }).click()
+ await page.getByRole('button', { name: /next/i }).click()
+
+ // Step 4: Wizard - Select Node Type (Peer)
+ await page.getByRole('button', { name: 'Peer node' }).click()
+ await page.getByRole('button', { name: /next/i }).click()
+
+ // Step 5: Wizard - Configuration
+ await page.getByPlaceholder('Enter node name').fill(nodeName)
+
+ // Select the first available organization (assume dropdown is present)
+ const orgSelect = page.getByRole('combobox', { name: /organization/i })
+ await orgSelect.click()
+ // Select the first option (could be improved to select by name if needed)
+ await page.getByRole('option').first().click()
+
+ // Select deployment mode "Docker"
+ const modeSelect = page.getByRole('combobox', { name: /mode/i })
+ await modeSelect.click()
+ await page.getByRole('option', { name: /docker/i }).click()
+ // Listen Address
+ const listenPort = 7000 + (new DataView(crypto.getRandomValues(new Uint8Array(4)).buffer).getUint32(0) % 1000)
+ await page.getByPlaceholder('e.g., 0.0.0.0:7051').fill(`0.0.0.0:${listenPort}`)
+
+ // Operations Address
+ const opsPort = 9000 + (new DataView(crypto.getRandomValues(new Uint8Array(4)).buffer).getUint32(0) % 1000)
+ await page.getByPlaceholder('e.g., 0.0.0.0:9443').fill(`0.0.0.0:${opsPort}`)
+
+ // External Endpoint
+ const extPort = 7000 + (new DataView(crypto.getRandomValues(new Uint8Array(4)).buffer).getUint32(0) % 1000)
+ await page.getByPlaceholder('e.g., peer0.org1.example.com:7051').fill(`peer0.example.com:${extPort}`)
+
+ // Go to Review step
+ await page.getByRole('button', { name: /next/i }).click()
+
+ // Step 6: Review and Submit
+ await page.getByRole('button', { name: /create node/i }).click()
+
+ // Wait for navigation to the node detail page or nodes list
+ await expect(page.getByText(/General Information/i)).toBeVisible({ timeout: 60000 })
+})
diff --git a/web/playwright/tests/create-node-fabric-peer.spec.ts b/web/playwright/tests/create-node-fabric-peer.spec.ts
new file mode 100644
index 0000000..04c2796
--- /dev/null
+++ b/web/playwright/tests/create-node-fabric-peer.spec.ts
@@ -0,0 +1,30 @@
+import { test, expect } from '@playwright/test'
+import { login } from './login'
+import { createOrganization } from './create-organization'
+import { createFabricNode } from './create-node-fabric-peer-logic'
+
+const FABRIC_NODE_CREATE_PATH = '/nodes/fabric/create'
+
+// Helper to generate unique values with cryptographically secure random numbers
+function uniqueSuffix() {
+ const bytes = new Uint8Array(4);
+ crypto.getRandomValues(bytes);
+ const randomNum = new DataView(bytes.buffer).getUint32(0) % 10000;
+ return `${Date.now()}-${randomNum}`;
+}
+
+test('can login, create an organization, and create a Fabric node', async ({ page, baseURL }) => {
+ await login(page, baseURL ?? '')
+
+ // Create a unique organization
+ const UNIQUE_SUFFIX = uniqueSuffix()
+ const mspId = `test-msp-${UNIQUE_SUFFIX}`
+ const description = `Test organization created by Playwright ${UNIQUE_SUFFIX}`
+ await createOrganization(page, baseURL, { mspId, description })
+
+ // Use the helper to create a Fabric node
+ const nodeName = await createFabricNode(page, baseURL ?? '', mspId)
+
+ // Optionally, assert the node name is visible or other post-creation checks
+ await expect(page.getByText(/General Information/i)).toBeVisible({ timeout: 60000 })
+})
diff --git a/web/playwright/tests/create-organization.spec.ts b/web/playwright/tests/create-organization.spec.ts
new file mode 100644
index 0000000..7574813
--- /dev/null
+++ b/web/playwright/tests/create-organization.spec.ts
@@ -0,0 +1,16 @@
+import { test } from '@playwright/test'
+import { createOrganization } from './create-organization'
+import { login } from './login'
+
+test('can login and create a new organization', async ({ page, baseURL }) => {
+ await login(page, baseURL ?? '')
+
+ // Use a unique suffix with cryptographically secure random values
+ const bytes = new Uint8Array(4)
+ crypto.getRandomValues(bytes)
+ const randomNum = new DataView(bytes.buffer).getUint32(0) % 10000
+ const UNIQUE_SUFFIX = `${Date.now()}-${randomNum}`
+ const mspId = `test-msp-${UNIQUE_SUFFIX}`
+ const description = `Test organization created by Playwright ${UNIQUE_SUFFIX}`
+ await createOrganization(page, baseURL, { mspId, description })
+})
diff --git a/web/playwright/tests/create-organization.ts b/web/playwright/tests/create-organization.ts
new file mode 100644
index 0000000..df47e67
--- /dev/null
+++ b/web/playwright/tests/create-organization.ts
@@ -0,0 +1,35 @@
+import { expect } from '@playwright/test'
+
+const ORGANIZATIONS_PATH = '/fabric/organizations'
+
+// Reusable function to create an organization
+export async function createOrganization(page, baseURL, { mspId, description, providerIndex = 0 }) {
+ // 1. Go to organizations page
+ await page.goto(baseURL + ORGANIZATIONS_PATH)
+ await expect(page.getByRole('heading', { name: 'Organizations' })).toBeVisible({ timeout: 10000 })
+
+ // 2. Open the create organization dialog
+ await page.getByRole('button', { name: /add organization/i }).click()
+
+ // 3. Fill in the form
+ await page.getByPlaceholder('Enter MSP ID').fill(mspId)
+ await page.getByPlaceholder('Enter organization description').fill(description)
+
+ // If provider select is present, select the specified option
+ const providerSelect = page.getByRole('combobox', { name: /key provider/i })
+ if (await providerSelect.isVisible().catch(() => false)) {
+ await providerSelect.click()
+ const option = page.locator('[role="option"]').nth(providerIndex)
+ if (await option.isVisible().catch(() => false)) {
+ await option.click()
+ }
+ }
+
+ // 4. Submit the form
+ await page.getByRole('button', { name: /create organization/i }).click()
+ await expect(page.getByRole('dialog')).not.toBeVisible({ timeout: 10000 })
+
+ // 5. Assert the new organization appears in the list
+ await expect(page.getByText(mspId)).toBeVisible({ timeout: 10000 })
+ await expect(page.getByText(description)).toBeVisible({ timeout: 10000 })
+}
\ No newline at end of file
diff --git a/web/playwright/tests/create-user.spec.ts b/web/playwright/tests/create-user.spec.ts
new file mode 100644
index 0000000..65c532e
--- /dev/null
+++ b/web/playwright/tests/create-user.spec.ts
@@ -0,0 +1,63 @@
+import { test, expect } from '@playwright/test'
+import { login } from './login'
+
+// Helper to generate a unique username with cryptographically secure random values
+function uniqueUsername() {
+ const bytes = new Uint8Array(4);
+ crypto.getRandomValues(bytes);
+ const randomNum = new DataView(bytes.buffer).getUint32(0) % 10000;
+ return `testuser_${Date.now()}_${randomNum}`;
+}
+
+const USER_MANAGEMENT_PATH = '/users'
+const NODES_PATH = '/nodes'
+
+// This test assumes the admin user is set in env vars for login
+// and that the admin can create users
+
+test('can create a user, logout, login as that user, and see nodes list', async ({ page, baseURL }) => {
+ // Step 1: Login as admin
+ await login(page, baseURL ?? '')
+
+ // Step 2: Go to user management
+ await page.goto((baseURL ?? '') + USER_MANAGEMENT_PATH)
+ await expect(page.getByRole('heading', { name: /users/i })).toBeVisible()
+
+ // Step 3: Open the create user dialog
+ await page.getByRole('button', { name: /add user/i }).click()
+ await expect(page.getByRole('dialog')).toBeVisible()
+
+ // Step 4: Fill in the user creation form
+ const username = uniqueUsername()
+ const password = 'TestPassword123!'
+ await page.getByLabel('Username').fill(username)
+ await page.getByLabel('Password').fill(password)
+ await page.getByLabel('Role').click()
+ await page.getByRole('option', { name: /viewer/i }).click()
+ await page.getByRole('button', { name: /create user/i }).click()
+
+ // Wait for dialog to close and user to appear in the list
+ await expect(page.getByRole('dialog')).not.toBeVisible({ timeout: 10000 })
+ await expect(page.getByText(username)).toBeVisible({ timeout: 10000 })
+
+ // Step 5: Logout
+ // Open account/profile menu and click logout (assuming a button or menu exists)
+ await page.locator('#user-menu-trigger').click()
+ await page.getByRole('menuitem', { name: /Log out/i }).click()
+
+ // Confirm the logout confirmation dialog appears
+ await expect(page.getByRole('alertdialog')).toBeVisible()
+ // Click the confirm button (assuming it is labeled 'Log out' or similar)
+ await page.getByRole('button', { name: /Log out/i }).click()
+
+ // Step 6: Login as the new user
+ await expect(page.getByPlaceholder('Enter your username')).toBeVisible()
+ await expect(page.getByPlaceholder('Enter your password')).toBeVisible()
+ await page.getByPlaceholder('Enter your username').fill(username)
+ await page.getByPlaceholder('Enter your password').fill(password)
+ await page.getByRole('button', { name: /sign in/i }).click()
+
+ // Step 7: Verify nodes list is visible
+ await expect(page).toHaveURL(/.*\/nodes$/, { timeout: 10000 })
+ await expect(page.getByRole('heading', { name: /nodes/i })).toBeVisible({ timeout: 10000 })
+})
\ No newline at end of file
diff --git a/web/playwright/tests/login.spec.ts b/web/playwright/tests/login.spec.ts
new file mode 100644
index 0000000..e4265e0
--- /dev/null
+++ b/web/playwright/tests/login.spec.ts
@@ -0,0 +1,6 @@
+import { test, expect } from '@playwright/test'
+import { login } from './login'
+
+test('can login with valid credentials', async ({ page, baseURL }) => {
+ await login(page, baseURL ?? '')
+})
diff --git a/web/playwright/tests/login.ts b/web/playwright/tests/login.ts
new file mode 100644
index 0000000..4c98085
--- /dev/null
+++ b/web/playwright/tests/login.ts
@@ -0,0 +1,21 @@
+import { Page, expect } from '@playwright/test'
+
+const USERNAME = process.env.PLAYWRIGHT_USER
+const PASSWORD = process.env.PLAYWRIGHT_PASSWORD
+const LOGIN_PATH = '/login'
+
+// Reusable login function
+export async function login(page: Page, baseURL: string) {
+ await page.goto(baseURL + LOGIN_PATH)
+ await expect(page.getByPlaceholder('Enter your username')).toBeVisible()
+ await expect(page.getByPlaceholder('Enter your password')).toBeVisible()
+
+ await page.getByPlaceholder('Enter your username').fill(USERNAME || '')
+ await page.getByPlaceholder('Enter your password').fill(PASSWORD || '')
+ const signInButton = page.getByRole('button', { name: /sign in/i })
+ await signInButton.waitFor({ state: 'visible' })
+ await signInButton.click()
+
+ await expect(page).toHaveURL(/.*\/nodes$/, { timeout: 10000 })
+ await expect(page.getByRole('heading', { name: /^(Nodes|Create your first node)$/ })).toBeVisible({ timeout: 10000 })
+}
diff --git a/web/playwright/tests/node-creation-wizard-helper.ts b/web/playwright/tests/node-creation-wizard-helper.ts
new file mode 100644
index 0000000..489f1b3
--- /dev/null
+++ b/web/playwright/tests/node-creation-wizard-helper.ts
@@ -0,0 +1,97 @@
+import { Page, expect } from '@playwright/test'
+
+export type FabricPeerParams = {
+ protocol: 'Fabric'
+ nodeType: 'Peer node'
+ nodeName: string
+ organization?: string
+ mode?: string
+ listenAddress: string
+ operationsAddress: string
+ externalEndpoint: string
+}
+
+export type FabricOrdererParams = {
+ protocol: 'Fabric'
+ nodeType: 'Orderer node'
+ nodeName: string
+ organization?: string
+ mode?: string
+ listenAddress: string
+ operationsAddress: string
+ externalEndpoint: string
+ // Add more Fabric Orderer-specific fields if needed
+}
+
+export type BesuNodeParams = {
+ protocol: 'Besu'
+ nodeType: 'Besu node'
+ nodeName: string
+ organization?: string
+ mode?: string
+ listenAddress: string
+ operationsAddress?: string
+ externalEndpoint?: string
+ // Add more Besu-specific fields if needed
+}
+
+export type NodeWizardParams = FabricPeerParams | FabricOrdererParams | BesuNodeParams
+
+export async function createNodeWithWizard(page: Page, baseURL: string, params: NodeWizardParams) {
+ await page.goto(baseURL + '/nodes/create')
+ await expect(page.getByRole('heading', { name: /create node/i })).toBeVisible()
+
+ // Step 1: Protocol
+ await page.getByRole('button', { name: params.protocol }).click()
+ await page.getByRole('button', { name: /next/i }).click()
+
+ // Step 2: Node Type
+ await page.getByRole('button', { name: params.nodeType }).click()
+ await page.getByRole('button', { name: /next/i }).click()
+
+ // Step 3: Configuration
+ await page.getByPlaceholder('Enter node name').fill(params.nodeName)
+
+ // Organization
+ if (params.organization) {
+ const orgSelect = page.getByRole('combobox', { name: /organization/i })
+ await orgSelect.click()
+ await page.getByRole('option', { name: params.organization }).click()
+ } else {
+ const orgSelect = page.getByRole('combobox', { name: /organization/i })
+ await orgSelect.click()
+ await page.getByRole('option').first().click()
+ }
+
+ // Mode
+ if (params.mode) {
+ const modeSelect = page.getByRole('combobox', { name: /mode/i })
+ await modeSelect.click()
+ await page.getByRole('option', { name: new RegExp(params.mode, 'i') }).click()
+ }
+
+ // Node-type specific fields
+ if (params.protocol === 'Fabric') {
+ // Both Peer and Orderer use these fields
+ await page.getByPlaceholder('e.g., 0.0.0.0:7051').fill(params.listenAddress)
+ await page.getByPlaceholder('e.g., 0.0.0.0:9443').fill(params.operationsAddress)
+ await page.getByPlaceholder('e.g., peer0.org1.example.com:7051').fill(params.externalEndpoint)
+ } else if (params.protocol === 'Besu') {
+ await page.getByPlaceholder('e.g., 0.0.0.0:7051').fill(params.listenAddress)
+ if (params.operationsAddress) {
+ await page.getByPlaceholder('e.g., 0.0.0.0:9443').fill(params.operationsAddress)
+ }
+ if (params.externalEndpoint) {
+ await page.getByPlaceholder('e.g., peer0.org1.example.com:7051').fill(params.externalEndpoint)
+ }
+ }
+
+ // Go to Review step
+ await page.getByRole('button', { name: /next/i }).click()
+
+ // Step 4: Review and Submit
+ await page.getByRole('button', { name: /create node/i }).click()
+
+ // Wait for navigation to the node detail page or nodes list
+ await expect(page.getByText(/General Information/i)).toBeVisible({ timeout: 60000 })
+}
diff --git a/web/src/App.tsx b/web/src/App.tsx
index d92eeae..d73445a 100644
--- a/web/src/App.tsx
+++ b/web/src/App.tsx
@@ -53,6 +53,10 @@ import PluginsPage from './pages/plugins'
import PluginDetailPage from './pages/plugins/[name]'
import NewPluginPage from './pages/plugins/new'
import UsersPage from './pages/users'
+import AccountPage from './pages/account'
+import AuditLogsPage from '@/pages/settings/audit-logs'
+import AuditLogDetailPage from '@/pages/settings/audit-logs/[id]'
+import AnalyticsPage from './pages/platform/analytics'
const queryClient = new QueryClient({
defaultOptions: {
@@ -82,6 +86,7 @@ const App = () => {
} />
+ } />
} />
} />
} />
@@ -122,6 +127,9 @@ const App = () => {
} />
} />
} />
+ } />
+ } />
+ } />
} />
diff --git a/web/src/api/client/@tanstack/react-query.gen.ts b/web/src/api/client/@tanstack/react-query.gen.ts
index 707bc64..5cdd3c1 100644
--- a/web/src/api/client/@tanstack/react-query.gen.ts
+++ b/web/src/api/client/@tanstack/react-query.gen.ts
@@ -1,9 +1,9 @@
// This file is auto-generated by @hey-api/openapi-ts
import type { Options } from '@hey-api/client-fetch';
-import { queryOptions, type UseMutationOptions, type DefaultError, infiniteQueryOptions, type InfiniteData } from '@tanstack/react-query';
-import type { PostAuthChangePasswordData, PostAuthChangePasswordError, PostAuthChangePasswordResponse, PostAuthLoginData, PostAuthLoginError, PostAuthLoginResponse, PostAuthLogoutData, PostAuthLogoutError, PostAuthLogoutResponse, GetAuthMeData, GetBackupsData, PostBackupsData, PostBackupsError, PostBackupsResponse, GetBackupsSchedulesData, PostBackupsSchedulesData, PostBackupsSchedulesError, PostBackupsSchedulesResponse, DeleteBackupsSchedulesByIdData, DeleteBackupsSchedulesByIdError, GetBackupsSchedulesByIdData, PutBackupsSchedulesByIdData, PutBackupsSchedulesByIdError, PutBackupsSchedulesByIdResponse, PutBackupsSchedulesByIdDisableData, PutBackupsSchedulesByIdDisableError, PutBackupsSchedulesByIdDisableResponse, PutBackupsSchedulesByIdEnableData, PutBackupsSchedulesByIdEnableError, PutBackupsSchedulesByIdEnableResponse, GetBackupsTargetsData, PostBackupsTargetsData, PostBackupsTargetsError, PostBackupsTargetsResponse, DeleteBackupsTargetsByIdData, DeleteBackupsTargetsByIdError, GetBackupsTargetsByIdData, PutBackupsTargetsByIdData, PutBackupsTargetsByIdError, PutBackupsTargetsByIdResponse, DeleteBackupsByIdData, DeleteBackupsByIdError, GetBackupsByIdData, PostDummyData, PostDummyResponse, GetKeyProvidersData, PostKeyProvidersData, PostKeyProvidersError, PostKeyProvidersResponse, DeleteKeyProvidersByIdData, DeleteKeyProvidersByIdError, GetKeyProvidersByIdData, GetKeysData, GetKeysError, GetKeysResponse, PostKeysData, PostKeysError, PostKeysResponse, GetKeysAllData, GetKeysFilterData, GetKeysFilterError, GetKeysFilterResponse, DeleteKeysByIdData, DeleteKeysByIdError, GetKeysByIdData, PostKeysByKeyIdSignData, PostKeysByKeyIdSignError, PostKeysByKeyIdSignResponse, GetNetworksBesuData, GetNetworksBesuError, GetNetworksBesuResponse, PostNetworksBesuData, PostNetworksBesuError, PostNetworksBesuResponse, PostNetworksBesuImportData, PostNetworksBesuImportError, PostNetworksBesuImportResponse, DeleteNetworksBesuByIdData, DeleteNetworksBesuByIdError, GetNetworksBesuByIdData, GetNetworksFabricData, GetNetworksFabricError, GetNetworksFabricResponse, PostNetworksFabricData, PostNetworksFabricError, PostNetworksFabricResponse, GetNetworksFabricByNameByNameData, PostNetworksFabricImportData, PostNetworksFabricImportError, PostNetworksFabricImportResponse, PostNetworksFabricImportWithOrgData, PostNetworksFabricImportWithOrgError, PostNetworksFabricImportWithOrgResponse, DeleteNetworksFabricByIdData, DeleteNetworksFabricByIdError, GetNetworksFabricByIdData, PostNetworksFabricByIdAnchorPeersData, PostNetworksFabricByIdAnchorPeersError, PostNetworksFabricByIdAnchorPeersResponse, GetNetworksFabricByIdBlocksData, GetNetworksFabricByIdBlocksError, GetNetworksFabricByIdBlocksResponse, GetNetworksFabricByIdBlocksByBlockNumData, GetNetworksFabricByIdChannelConfigData, GetNetworksFabricByIdCurrentChannelConfigData, GetNetworksFabricByIdInfoData, GetNetworksFabricByIdNodesData, PostNetworksFabricByIdNodesData, PostNetworksFabricByIdNodesError, PostNetworksFabricByIdNodesResponse, DeleteNetworksFabricByIdOrderersByOrdererIdData, DeleteNetworksFabricByIdOrderersByOrdererIdError, DeleteNetworksFabricByIdOrderersByOrdererIdResponse, PostNetworksFabricByIdOrderersByOrdererIdJoinData, PostNetworksFabricByIdOrderersByOrdererIdJoinError, PostNetworksFabricByIdOrderersByOrdererIdJoinResponse, PostNetworksFabricByIdOrderersByOrdererIdUnjoinData, PostNetworksFabricByIdOrderersByOrdererIdUnjoinError, PostNetworksFabricByIdOrderersByOrdererIdUnjoinResponse, PostNetworksFabricByIdOrganizationCrlData, PostNetworksFabricByIdOrganizationCrlError, PostNetworksFabricByIdOrganizationCrlResponse, GetNetworksFabricByIdOrganizationsByOrgIdConfigData, DeleteNetworksFabricByIdPeersByPeerIdData, DeleteNetworksFabricByIdPeersByPeerIdError, DeleteNetworksFabricByIdPeersByPeerIdResponse, PostNetworksFabricByIdPeersByPeerIdJoinData, PostNetworksFabricByIdPeersByPeerIdJoinError, PostNetworksFabricByIdPeersByPeerIdJoinResponse, PostNetworksFabricByIdPeersByPeerIdUnjoinData, PostNetworksFabricByIdPeersByPeerIdUnjoinError, PostNetworksFabricByIdPeersByPeerIdUnjoinResponse, PostNetworksFabricByIdReloadBlockData, PostNetworksFabricByIdReloadBlockError, PostNetworksFabricByIdReloadBlockResponse, GetNetworksFabricByIdTransactionsByTxIdData, PostNetworksFabricByIdUpdateConfigData, PostNetworksFabricByIdUpdateConfigError, PostNetworksFabricByIdUpdateConfigResponse, GetNodesData, GetNodesError, GetNodesResponse, PostNodesData, PostNodesError, PostNodesResponse, GetNodesDefaultsBesuNodeData, GetNodesDefaultsFabricData, GetNodesDefaultsFabricOrdererData, GetNodesDefaultsFabricPeerData, GetNodesPlatformByPlatformData, GetNodesPlatformByPlatformError, GetNodesPlatformByPlatformResponse, DeleteNodesByIdData, DeleteNodesByIdError, GetNodesByIdData, PutNodesByIdData, PutNodesByIdError, PutNodesByIdResponse, PostNodesByIdCertificatesRenewData, PostNodesByIdCertificatesRenewError, PostNodesByIdCertificatesRenewResponse, GetNodesByIdChannelsData, GetNodesByIdEventsData, GetNodesByIdEventsError, GetNodesByIdEventsResponse, GetNodesByIdLogsData, PostNodesByIdRestartData, PostNodesByIdRestartError, PostNodesByIdRestartResponse, PostNodesByIdStartData, PostNodesByIdStartError, PostNodesByIdStartResponse, PostNodesByIdStopData, PostNodesByIdStopError, PostNodesByIdStopResponse, GetNotificationsProvidersData, PostNotificationsProvidersData, PostNotificationsProvidersError, PostNotificationsProvidersResponse, DeleteNotificationsProvidersByIdData, DeleteNotificationsProvidersByIdError, GetNotificationsProvidersByIdData, PutNotificationsProvidersByIdData, PutNotificationsProvidersByIdError, PutNotificationsProvidersByIdResponse, PostNotificationsProvidersByIdTestData, PostNotificationsProvidersByIdTestError, PostNotificationsProvidersByIdTestResponse, GetOrganizationsData, PostOrganizationsData, PostOrganizationsError, PostOrganizationsResponse, GetOrganizationsByMspidByMspidData, DeleteOrganizationsByIdData, DeleteOrganizationsByIdError, GetOrganizationsByIdData, PutOrganizationsByIdData, PutOrganizationsByIdError, PutOrganizationsByIdResponse, GetOrganizationsByIdCrlData, PostOrganizationsByIdCrlRevokePemData, PostOrganizationsByIdCrlRevokePemError, PostOrganizationsByIdCrlRevokePemResponse, DeleteOrganizationsByIdCrlRevokeSerialData, DeleteOrganizationsByIdCrlRevokeSerialError, DeleteOrganizationsByIdCrlRevokeSerialResponse, PostOrganizationsByIdCrlRevokeSerialData, PostOrganizationsByIdCrlRevokeSerialError, PostOrganizationsByIdCrlRevokeSerialResponse, GetOrganizationsByIdRevokedCertificatesData, GetPluginsData, PostPluginsData, PostPluginsError, PostPluginsResponse, DeletePluginsByNameData, DeletePluginsByNameError, GetPluginsByNameData, PutPluginsByNameData, PutPluginsByNameError, PutPluginsByNameResponse, PostPluginsByNameDeployData, PostPluginsByNameDeployError, GetPluginsByNameDeploymentStatusData, GetPluginsByNameServicesData, GetPluginsByNameStatusData, PostPluginsByNameStopData, PostPluginsByNameStopError, GetSettingsData, PostSettingsData, PostSettingsResponse, GetUsersData, PostUsersData, PostUsersError, PostUsersResponse, DeleteUsersByIdData, DeleteUsersByIdError, GetUsersByIdData, PutUsersByIdData, PutUsersByIdError, PutUsersByIdResponse, PutUsersByIdPasswordData, PutUsersByIdPasswordError, PutUsersByIdPasswordResponse, PutUsersByIdRoleData, PutUsersByIdRoleError, PutUsersByIdRoleResponse } from '../types.gen';
-import { postAuthChangePassword, postAuthLogin, postAuthLogout, getAuthMe, getBackups, postBackups, getBackupsSchedules, postBackupsSchedules, deleteBackupsSchedulesById, getBackupsSchedulesById, putBackupsSchedulesById, putBackupsSchedulesByIdDisable, putBackupsSchedulesByIdEnable, getBackupsTargets, postBackupsTargets, deleteBackupsTargetsById, getBackupsTargetsById, putBackupsTargetsById, deleteBackupsById, getBackupsById, postDummy, getKeyProviders, postKeyProviders, deleteKeyProvidersById, getKeyProvidersById, getKeys, postKeys, getKeysAll, getKeysFilter, deleteKeysById, getKeysById, postKeysByKeyIdSign, getNetworksBesu, postNetworksBesu, postNetworksBesuImport, deleteNetworksBesuById, getNetworksBesuById, getNetworksFabric, postNetworksFabric, getNetworksFabricByNameByName, postNetworksFabricImport, postNetworksFabricImportWithOrg, deleteNetworksFabricById, getNetworksFabricById, postNetworksFabricByIdAnchorPeers, getNetworksFabricByIdBlocks, getNetworksFabricByIdBlocksByBlockNum, getNetworksFabricByIdChannelConfig, getNetworksFabricByIdCurrentChannelConfig, getNetworksFabricByIdInfo, getNetworksFabricByIdNodes, postNetworksFabricByIdNodes, deleteNetworksFabricByIdOrderersByOrdererId, postNetworksFabricByIdOrderersByOrdererIdJoin, postNetworksFabricByIdOrderersByOrdererIdUnjoin, postNetworksFabricByIdOrganizationCrl, getNetworksFabricByIdOrganizationsByOrgIdConfig, deleteNetworksFabricByIdPeersByPeerId, postNetworksFabricByIdPeersByPeerIdJoin, postNetworksFabricByIdPeersByPeerIdUnjoin, postNetworksFabricByIdReloadBlock, getNetworksFabricByIdTransactionsByTxId, postNetworksFabricByIdUpdateConfig, getNodes, postNodes, getNodesDefaultsBesuNode, getNodesDefaultsFabric, getNodesDefaultsFabricOrderer, getNodesDefaultsFabricPeer, getNodesPlatformByPlatform, deleteNodesById, getNodesById, putNodesById, postNodesByIdCertificatesRenew, getNodesByIdChannels, getNodesByIdEvents, getNodesByIdLogs, postNodesByIdRestart, postNodesByIdStart, postNodesByIdStop, getNotificationsProviders, postNotificationsProviders, deleteNotificationsProvidersById, getNotificationsProvidersById, putNotificationsProvidersById, postNotificationsProvidersByIdTest, getOrganizations, postOrganizations, getOrganizationsByMspidByMspid, deleteOrganizationsById, getOrganizationsById, putOrganizationsById, getOrganizationsByIdCrl, postOrganizationsByIdCrlRevokePem, deleteOrganizationsByIdCrlRevokeSerial, postOrganizationsByIdCrlRevokeSerial, getOrganizationsByIdRevokedCertificates, getPlugins, postPlugins, deletePluginsByName, getPluginsByName, putPluginsByName, postPluginsByNameDeploy, getPluginsByNameDeploymentStatus, getPluginsByNameServices, getPluginsByNameStatus, postPluginsByNameStop, getSettings, postSettings, getUsers, postUsers, deleteUsersById, getUsersById, putUsersById, putUsersByIdPassword, putUsersByIdRole, client } from '../sdk.gen';
+import { queryOptions, type UseMutationOptions, infiniteQueryOptions, type InfiniteData, type DefaultError } from '@tanstack/react-query';
+import type { PostApiV1MetricsDeployData, PostApiV1MetricsDeployError, PostApiV1MetricsDeployResponse, GetApiV1MetricsNodeByIdData, GetApiV1MetricsNodeByIdLabelByLabelValuesData, PostApiV1MetricsNodeByIdQueryData, PostApiV1MetricsNodeByIdQueryError, PostApiV1MetricsNodeByIdQueryResponse, GetApiV1MetricsNodeByIdRangeData, GetApiV1MetricsNodeByIdRangeError, GetApiV1MetricsNodeByIdRangeResponse, PostApiV1MetricsReloadData, PostApiV1MetricsReloadError, PostApiV1MetricsReloadResponse, GetApiV1MetricsStatusData, GetAuditLogsData, GetAuditLogsError, GetAuditLogsResponse, GetAuditLogsByIdData, PostAuthChangePasswordData, PostAuthChangePasswordError, PostAuthChangePasswordResponse, PostAuthLoginData, PostAuthLoginError, PostAuthLoginResponse, PostAuthLogoutData, PostAuthLogoutError, PostAuthLogoutResponse, GetAuthMeData, GetBackupsData, PostBackupsData, PostBackupsError, PostBackupsResponse, GetBackupsSchedulesData, PostBackupsSchedulesData, PostBackupsSchedulesError, PostBackupsSchedulesResponse, DeleteBackupsSchedulesByIdData, DeleteBackupsSchedulesByIdError, GetBackupsSchedulesByIdData, PutBackupsSchedulesByIdData, PutBackupsSchedulesByIdError, PutBackupsSchedulesByIdResponse, PutBackupsSchedulesByIdDisableData, PutBackupsSchedulesByIdDisableError, PutBackupsSchedulesByIdDisableResponse, PutBackupsSchedulesByIdEnableData, PutBackupsSchedulesByIdEnableError, PutBackupsSchedulesByIdEnableResponse, GetBackupsTargetsData, PostBackupsTargetsData, PostBackupsTargetsError, PostBackupsTargetsResponse, DeleteBackupsTargetsByIdData, DeleteBackupsTargetsByIdError, GetBackupsTargetsByIdData, PutBackupsTargetsByIdData, PutBackupsTargetsByIdError, PutBackupsTargetsByIdResponse, DeleteBackupsByIdData, DeleteBackupsByIdError, GetBackupsByIdData, PostDummyData, PostDummyResponse, GetKeyProvidersData, PostKeyProvidersData, PostKeyProvidersError, PostKeyProvidersResponse, DeleteKeyProvidersByIdData, DeleteKeyProvidersByIdError, GetKeyProvidersByIdData, GetKeysData, GetKeysError, GetKeysResponse, PostKeysData, PostKeysError, PostKeysResponse, GetKeysAllData, GetKeysFilterData, GetKeysFilterError, GetKeysFilterResponse, DeleteKeysByIdData, DeleteKeysByIdError, GetKeysByIdData, PostKeysByKeyIdSignData, PostKeysByKeyIdSignError, PostKeysByKeyIdSignResponse, GetNetworksBesuData, GetNetworksBesuError, GetNetworksBesuResponse, PostNetworksBesuData, PostNetworksBesuError, PostNetworksBesuResponse, PostNetworksBesuImportData, PostNetworksBesuImportError, PostNetworksBesuImportResponse, DeleteNetworksBesuByIdData, DeleteNetworksBesuByIdError, GetNetworksBesuByIdData, GetNetworksFabricData, GetNetworksFabricError, GetNetworksFabricResponse, PostNetworksFabricData, PostNetworksFabricError, PostNetworksFabricResponse, GetNetworksFabricByNameByNameData, PostNetworksFabricImportData, PostNetworksFabricImportError, PostNetworksFabricImportResponse, PostNetworksFabricImportWithOrgData, PostNetworksFabricImportWithOrgError, PostNetworksFabricImportWithOrgResponse, DeleteNetworksFabricByIdData, DeleteNetworksFabricByIdError, GetNetworksFabricByIdData, PostNetworksFabricByIdAnchorPeersData, PostNetworksFabricByIdAnchorPeersError, PostNetworksFabricByIdAnchorPeersResponse, GetNetworksFabricByIdBlocksData, GetNetworksFabricByIdBlocksError, GetNetworksFabricByIdBlocksResponse, GetNetworksFabricByIdBlocksByBlockNumData, GetNetworksFabricByIdChannelConfigData, GetNetworksFabricByIdCurrentChannelConfigData, GetNetworksFabricByIdInfoData, GetNetworksFabricByIdNodesData, PostNetworksFabricByIdNodesData, PostNetworksFabricByIdNodesError, PostNetworksFabricByIdNodesResponse, DeleteNetworksFabricByIdOrderersByOrdererIdData, DeleteNetworksFabricByIdOrderersByOrdererIdError, DeleteNetworksFabricByIdOrderersByOrdererIdResponse, PostNetworksFabricByIdOrderersByOrdererIdJoinData, PostNetworksFabricByIdOrderersByOrdererIdJoinError, PostNetworksFabricByIdOrderersByOrdererIdJoinResponse, PostNetworksFabricByIdOrderersByOrdererIdUnjoinData, PostNetworksFabricByIdOrderersByOrdererIdUnjoinError, PostNetworksFabricByIdOrderersByOrdererIdUnjoinResponse, PostNetworksFabricByIdOrganizationCrlData, PostNetworksFabricByIdOrganizationCrlError, PostNetworksFabricByIdOrganizationCrlResponse, GetNetworksFabricByIdOrganizationsByOrgIdConfigData, DeleteNetworksFabricByIdPeersByPeerIdData, DeleteNetworksFabricByIdPeersByPeerIdError, DeleteNetworksFabricByIdPeersByPeerIdResponse, PostNetworksFabricByIdPeersByPeerIdJoinData, PostNetworksFabricByIdPeersByPeerIdJoinError, PostNetworksFabricByIdPeersByPeerIdJoinResponse, PostNetworksFabricByIdPeersByPeerIdUnjoinData, PostNetworksFabricByIdPeersByPeerIdUnjoinError, PostNetworksFabricByIdPeersByPeerIdUnjoinResponse, PostNetworksFabricByIdReloadBlockData, PostNetworksFabricByIdReloadBlockError, PostNetworksFabricByIdReloadBlockResponse, GetNetworksFabricByIdTransactionsByTxIdData, PostNetworksFabricByIdUpdateConfigData, PostNetworksFabricByIdUpdateConfigError, PostNetworksFabricByIdUpdateConfigResponse, GetNodesData, GetNodesError, GetNodesResponse, PostNodesData, PostNodesError, PostNodesResponse, GetNodesDefaultsBesuNodeData, GetNodesDefaultsFabricData, GetNodesDefaultsFabricOrdererData, GetNodesDefaultsFabricPeerData, GetNodesPlatformByPlatformData, GetNodesPlatformByPlatformError, GetNodesPlatformByPlatformResponse, DeleteNodesByIdData, DeleteNodesByIdError, GetNodesByIdData, PutNodesByIdData, PutNodesByIdError, PutNodesByIdResponse, PostNodesByIdCertificatesRenewData, PostNodesByIdCertificatesRenewError, PostNodesByIdCertificatesRenewResponse, GetNodesByIdChannelsData, GetNodesByIdChannelsByChannelIdChaincodesData, GetNodesByIdEventsData, GetNodesByIdEventsError, GetNodesByIdEventsResponse, GetNodesByIdLogsData, PostNodesByIdRestartData, PostNodesByIdRestartError, PostNodesByIdRestartResponse, PostNodesByIdStartData, PostNodesByIdStartError, PostNodesByIdStartResponse, PostNodesByIdStopData, PostNodesByIdStopError, PostNodesByIdStopResponse, GetNotificationsProvidersData, PostNotificationsProvidersData, PostNotificationsProvidersError, PostNotificationsProvidersResponse, DeleteNotificationsProvidersByIdData, DeleteNotificationsProvidersByIdError, GetNotificationsProvidersByIdData, PutNotificationsProvidersByIdData, PutNotificationsProvidersByIdError, PutNotificationsProvidersByIdResponse, PostNotificationsProvidersByIdTestData, PostNotificationsProvidersByIdTestError, PostNotificationsProvidersByIdTestResponse, GetOrganizationsData, GetOrganizationsError, GetOrganizationsResponse, PostOrganizationsData, PostOrganizationsError, PostOrganizationsResponse, GetOrganizationsByMspidByMspidData, DeleteOrganizationsByIdData, DeleteOrganizationsByIdError, GetOrganizationsByIdData, PutOrganizationsByIdData, PutOrganizationsByIdError, PutOrganizationsByIdResponse, GetOrganizationsByIdCrlData, PostOrganizationsByIdCrlRevokePemData, PostOrganizationsByIdCrlRevokePemError, PostOrganizationsByIdCrlRevokePemResponse, DeleteOrganizationsByIdCrlRevokeSerialData, DeleteOrganizationsByIdCrlRevokeSerialError, DeleteOrganizationsByIdCrlRevokeSerialResponse, PostOrganizationsByIdCrlRevokeSerialData, PostOrganizationsByIdCrlRevokeSerialError, PostOrganizationsByIdCrlRevokeSerialResponse, GetOrganizationsByIdRevokedCertificatesData, GetPluginsData, PostPluginsData, PostPluginsError, PostPluginsResponse, DeletePluginsByNameData, DeletePluginsByNameError, GetPluginsByNameData, PutPluginsByNameData, PutPluginsByNameError, PutPluginsByNameResponse, PostPluginsByNameDeployData, PostPluginsByNameDeployError, GetPluginsByNameDeploymentStatusData, PostPluginsByNameResumeData, PostPluginsByNameResumeError, PostPluginsByNameResumeResponse, GetPluginsByNameServicesData, GetPluginsByNameStatusData, PostPluginsByNameStopData, PostPluginsByNameStopError, GetSettingsData, PostSettingsData, PostSettingsResponse, GetUsersData, PostUsersData, PostUsersError, PostUsersResponse, DeleteUsersByIdData, DeleteUsersByIdError, GetUsersByIdData, PutUsersByIdData, PutUsersByIdError, PutUsersByIdResponse, PutUsersByIdPasswordData, PutUsersByIdPasswordError, PutUsersByIdPasswordResponse, PutUsersByIdRoleData, PutUsersByIdRoleError, PutUsersByIdRoleResponse } from '../types.gen';
+import { postApiV1MetricsDeploy, getApiV1MetricsNodeById, getApiV1MetricsNodeByIdLabelByLabelValues, postApiV1MetricsNodeByIdQuery, getApiV1MetricsNodeByIdRange, postApiV1MetricsReload, getApiV1MetricsStatus, getAuditLogs, getAuditLogsById, postAuthChangePassword, postAuthLogin, postAuthLogout, getAuthMe, getBackups, postBackups, getBackupsSchedules, postBackupsSchedules, deleteBackupsSchedulesById, getBackupsSchedulesById, putBackupsSchedulesById, putBackupsSchedulesByIdDisable, putBackupsSchedulesByIdEnable, getBackupsTargets, postBackupsTargets, deleteBackupsTargetsById, getBackupsTargetsById, putBackupsTargetsById, deleteBackupsById, getBackupsById, postDummy, getKeyProviders, postKeyProviders, deleteKeyProvidersById, getKeyProvidersById, getKeys, postKeys, getKeysAll, getKeysFilter, deleteKeysById, getKeysById, postKeysByKeyIdSign, getNetworksBesu, postNetworksBesu, postNetworksBesuImport, deleteNetworksBesuById, getNetworksBesuById, getNetworksFabric, postNetworksFabric, getNetworksFabricByNameByName, postNetworksFabricImport, postNetworksFabricImportWithOrg, deleteNetworksFabricById, getNetworksFabricById, postNetworksFabricByIdAnchorPeers, getNetworksFabricByIdBlocks, getNetworksFabricByIdBlocksByBlockNum, getNetworksFabricByIdChannelConfig, getNetworksFabricByIdCurrentChannelConfig, getNetworksFabricByIdInfo, getNetworksFabricByIdNodes, postNetworksFabricByIdNodes, deleteNetworksFabricByIdOrderersByOrdererId, postNetworksFabricByIdOrderersByOrdererIdJoin, postNetworksFabricByIdOrderersByOrdererIdUnjoin, postNetworksFabricByIdOrganizationCrl, getNetworksFabricByIdOrganizationsByOrgIdConfig, deleteNetworksFabricByIdPeersByPeerId, postNetworksFabricByIdPeersByPeerIdJoin, postNetworksFabricByIdPeersByPeerIdUnjoin, postNetworksFabricByIdReloadBlock, getNetworksFabricByIdTransactionsByTxId, postNetworksFabricByIdUpdateConfig, getNodes, postNodes, getNodesDefaultsBesuNode, getNodesDefaultsFabric, getNodesDefaultsFabricOrderer, getNodesDefaultsFabricPeer, getNodesPlatformByPlatform, deleteNodesById, getNodesById, putNodesById, postNodesByIdCertificatesRenew, getNodesByIdChannels, getNodesByIdChannelsByChannelIdChaincodes, getNodesByIdEvents, getNodesByIdLogs, postNodesByIdRestart, postNodesByIdStart, postNodesByIdStop, getNotificationsProviders, postNotificationsProviders, deleteNotificationsProvidersById, getNotificationsProvidersById, putNotificationsProvidersById, postNotificationsProvidersByIdTest, getOrganizations, postOrganizations, getOrganizationsByMspidByMspid, deleteOrganizationsById, getOrganizationsById, putOrganizationsById, getOrganizationsByIdCrl, postOrganizationsByIdCrlRevokePem, deleteOrganizationsByIdCrlRevokeSerial, postOrganizationsByIdCrlRevokeSerial, getOrganizationsByIdRevokedCertificates, getPlugins, postPlugins, deletePluginsByName, getPluginsByName, putPluginsByName, postPluginsByNameDeploy, getPluginsByNameDeploymentStatus, postPluginsByNameResume, getPluginsByNameServices, getPluginsByNameStatus, postPluginsByNameStop, getSettings, postSettings, getUsers, postUsers, deleteUsersById, getUsersById, putUsersById, putUsersByIdPassword, putUsersByIdRole, client } from '../sdk.gen';
type QueryKey = [
Pick & {
@@ -32,6 +32,332 @@ const createQueryKey = (id: string, options?: TOptions
return params;
};
+export const postApiV1MetricsDeployQueryKey = (options: Options) => [
+ createQueryKey('postApiV1MetricsDeploy', options)
+];
+
+export const postApiV1MetricsDeployOptions = (options: Options) => {
+ return queryOptions({
+ queryFn: async ({ queryKey, signal }) => {
+ const { data } = await postApiV1MetricsDeploy({
+ ...options,
+ ...queryKey[0],
+ signal,
+ throwOnError: true
+ });
+ return data;
+ },
+ queryKey: postApiV1MetricsDeployQueryKey(options)
+ });
+};
+
+export const postApiV1MetricsDeployMutation = (options?: Partial>) => {
+ const mutationOptions: UseMutationOptions> = {
+ mutationFn: async (localOptions) => {
+ const { data } = await postApiV1MetricsDeploy({
+ ...options,
+ ...localOptions,
+ throwOnError: true
+ });
+ return data;
+ }
+ };
+ return mutationOptions;
+};
+
+export const getApiV1MetricsNodeByIdQueryKey = (options: Options) => [
+ createQueryKey('getApiV1MetricsNodeById', options)
+];
+
+export const getApiV1MetricsNodeByIdOptions = (options: Options) => {
+ return queryOptions({
+ queryFn: async ({ queryKey, signal }) => {
+ const { data } = await getApiV1MetricsNodeById({
+ ...options,
+ ...queryKey[0],
+ signal,
+ throwOnError: true
+ });
+ return data;
+ },
+ queryKey: getApiV1MetricsNodeByIdQueryKey(options)
+ });
+};
+
+export const getApiV1MetricsNodeByIdLabelByLabelValuesQueryKey = (options: Options) => [
+ createQueryKey('getApiV1MetricsNodeByIdLabelByLabelValues', options)
+];
+
+export const getApiV1MetricsNodeByIdLabelByLabelValuesOptions = (options: Options) => {
+ return queryOptions({
+ queryFn: async ({ queryKey, signal }) => {
+ const { data } = await getApiV1MetricsNodeByIdLabelByLabelValues({
+ ...options,
+ ...queryKey[0],
+ signal,
+ throwOnError: true
+ });
+ return data;
+ },
+ queryKey: getApiV1MetricsNodeByIdLabelByLabelValuesQueryKey(options)
+ });
+};
+
+export const postApiV1MetricsNodeByIdQueryQueryKey = (options: Options) => [
+ createQueryKey('postApiV1MetricsNodeByIdQuery', options)
+];
+
+export const postApiV1MetricsNodeByIdQueryOptions = (options: Options) => {
+ return queryOptions({
+ queryFn: async ({ queryKey, signal }) => {
+ const { data } = await postApiV1MetricsNodeByIdQuery({
+ ...options,
+ ...queryKey[0],
+ signal,
+ throwOnError: true
+ });
+ return data;
+ },
+ queryKey: postApiV1MetricsNodeByIdQueryQueryKey(options)
+ });
+};
+
+const createInfiniteParams = [0], 'body' | 'headers' | 'path' | 'query'>>(queryKey: QueryKey, page: K) => {
+ const params = queryKey[0];
+ if (page.body) {
+ params.body = {
+ ...queryKey[0].body as any,
+ ...page.body as any
+ };
+ }
+ if (page.headers) {
+ params.headers = {
+ ...queryKey[0].headers,
+ ...page.headers
+ };
+ }
+ if (page.path) {
+ params.path = {
+ ...queryKey[0].path as any,
+ ...page.path as any
+ };
+ }
+ if (page.query) {
+ params.query = {
+ ...queryKey[0].query as any,
+ ...page.query as any
+ };
+ }
+ return params as unknown as typeof page;
+};
+
+export const postApiV1MetricsNodeByIdQueryInfiniteQueryKey = (options: Options): QueryKey> => [
+ createQueryKey('postApiV1MetricsNodeByIdQuery', options, true)
+];
+
+export const postApiV1MetricsNodeByIdQueryInfiniteOptions = (options: Options) => {
+ return infiniteQueryOptions, QueryKey>, string | Pick>[0], 'body' | 'headers' | 'path' | 'query'>>(
+ // @ts-ignore
+ {
+ queryFn: async ({ pageParam, queryKey, signal }) => {
+ // @ts-ignore
+ const page: Pick>[0], 'body' | 'headers' | 'path' | 'query'> = typeof pageParam === 'object' ? pageParam : {
+ body: {
+ start: pageParam
+ }
+ };
+ const params = createInfiniteParams(queryKey, page);
+ const { data } = await postApiV1MetricsNodeByIdQuery({
+ ...options,
+ ...params,
+ signal,
+ throwOnError: true
+ });
+ return data;
+ },
+ queryKey: postApiV1MetricsNodeByIdQueryInfiniteQueryKey(options)
+ });
+};
+
+export const postApiV1MetricsNodeByIdQueryMutation = (options?: Partial>) => {
+ const mutationOptions: UseMutationOptions> = {
+ mutationFn: async (localOptions) => {
+ const { data } = await postApiV1MetricsNodeByIdQuery({
+ ...options,
+ ...localOptions,
+ throwOnError: true
+ });
+ return data;
+ }
+ };
+ return mutationOptions;
+};
+
+export const getApiV1MetricsNodeByIdRangeQueryKey = (options: Options) => [
+ createQueryKey('getApiV1MetricsNodeByIdRange', options)
+];
+
+export const getApiV1MetricsNodeByIdRangeOptions = (options: Options) => {
+ return queryOptions({
+ queryFn: async ({ queryKey, signal }) => {
+ const { data } = await getApiV1MetricsNodeByIdRange({
+ ...options,
+ ...queryKey[0],
+ signal,
+ throwOnError: true
+ });
+ return data;
+ },
+ queryKey: getApiV1MetricsNodeByIdRangeQueryKey(options)
+ });
+};
+
+export const getApiV1MetricsNodeByIdRangeInfiniteQueryKey = (options: Options): QueryKey> => [
+ createQueryKey('getApiV1MetricsNodeByIdRange', options, true)
+];
+
+export const getApiV1MetricsNodeByIdRangeInfiniteOptions = (options: Options) => {
+ return infiniteQueryOptions, QueryKey>, string | Pick>[0], 'body' | 'headers' | 'path' | 'query'>>(
+ // @ts-ignore
+ {
+ queryFn: async ({ pageParam, queryKey, signal }) => {
+ // @ts-ignore
+ const page: Pick>[0], 'body' | 'headers' | 'path' | 'query'> = typeof pageParam === 'object' ? pageParam : {
+ query: {
+ start: pageParam
+ }
+ };
+ const params = createInfiniteParams(queryKey, page);
+ const { data } = await getApiV1MetricsNodeByIdRange({
+ ...options,
+ ...params,
+ signal,
+ throwOnError: true
+ });
+ return data;
+ },
+ queryKey: getApiV1MetricsNodeByIdRangeInfiniteQueryKey(options)
+ });
+};
+
+export const postApiV1MetricsReloadQueryKey = (options?: Options) => [
+ createQueryKey('postApiV1MetricsReload', options)
+];
+
+export const postApiV1MetricsReloadOptions = (options?: Options) => {
+ return queryOptions({
+ queryFn: async ({ queryKey, signal }) => {
+ const { data } = await postApiV1MetricsReload({
+ ...options,
+ ...queryKey[0],
+ signal,
+ throwOnError: true
+ });
+ return data;
+ },
+ queryKey: postApiV1MetricsReloadQueryKey(options)
+ });
+};
+
+export const postApiV1MetricsReloadMutation = (options?: Partial>) => {
+ const mutationOptions: UseMutationOptions> = {
+ mutationFn: async (localOptions) => {
+ const { data } = await postApiV1MetricsReload({
+ ...options,
+ ...localOptions,
+ throwOnError: true
+ });
+ return data;
+ }
+ };
+ return mutationOptions;
+};
+
+export const getApiV1MetricsStatusQueryKey = (options?: Options) => [
+ createQueryKey('getApiV1MetricsStatus', options)
+];
+
+export const getApiV1MetricsStatusOptions = (options?: Options) => {
+ return queryOptions({
+ queryFn: async ({ queryKey, signal }) => {
+ const { data } = await getApiV1MetricsStatus({
+ ...options,
+ ...queryKey[0],
+ signal,
+ throwOnError: true
+ });
+ return data;
+ },
+ queryKey: getApiV1MetricsStatusQueryKey(options)
+ });
+};
+
+export const getAuditLogsQueryKey = (options?: Options) => [
+ createQueryKey('getAuditLogs', options)
+];
+
+export const getAuditLogsOptions = (options?: Options) => {
+ return queryOptions({
+ queryFn: async ({ queryKey, signal }) => {
+ const { data } = await getAuditLogs({
+ ...options,
+ ...queryKey[0],
+ signal,
+ throwOnError: true
+ });
+ return data;
+ },
+ queryKey: getAuditLogsQueryKey(options)
+ });
+};
+
+export const getAuditLogsInfiniteQueryKey = (options?: Options): QueryKey> => [
+ createQueryKey('getAuditLogs', options, true)
+];
+
+export const getAuditLogsInfiniteOptions = (options?: Options) => {
+ return infiniteQueryOptions, QueryKey>, number | Pick>[0], 'body' | 'headers' | 'path' | 'query'>>(
+ // @ts-ignore
+ {
+ queryFn: async ({ pageParam, queryKey, signal }) => {
+ // @ts-ignore
+ const page: Pick>[0], 'body' | 'headers' | 'path' | 'query'> = typeof pageParam === 'object' ? pageParam : {
+ query: {
+ page: pageParam
+ }
+ };
+ const params = createInfiniteParams(queryKey, page);
+ const { data } = await getAuditLogs({
+ ...options,
+ ...params,
+ signal,
+ throwOnError: true
+ });
+ return data;
+ },
+ queryKey: getAuditLogsInfiniteQueryKey(options)
+ });
+};
+
+export const getAuditLogsByIdQueryKey = (options: Options) => [
+ createQueryKey('getAuditLogsById', options)
+];
+
+export const getAuditLogsByIdOptions = (options: Options) => {
+ return queryOptions({
+ queryFn: async ({ queryKey, signal }) => {
+ const { data } = await getAuditLogsById({
+ ...options,
+ ...queryKey[0],
+ signal,
+ throwOnError: true
+ });
+ return data;
+ },
+ queryKey: getAuditLogsByIdQueryKey(options)
+ });
+};
+
export const postAuthChangePasswordQueryKey = (options: Options) => [
createQueryKey('postAuthChangePassword', options)
];
@@ -598,35 +924,6 @@ export const getKeysOptions = (options?: Options) => {
});
};
-const createInfiniteParams = [0], 'body' | 'headers' | 'path' | 'query'>>(queryKey: QueryKey, page: K) => {
- const params = queryKey[0];
- if (page.body) {
- params.body = {
- ...queryKey[0].body as any,
- ...page.body as any
- };
- }
- if (page.headers) {
- params.headers = {
- ...queryKey[0].headers,
- ...page.headers
- };
- }
- if (page.path) {
- params.path = {
- ...queryKey[0].path as any,
- ...page.path as any
- };
- }
- if (page.query) {
- params.query = {
- ...queryKey[0].query as any,
- ...page.query as any
- };
- }
- return params as unknown as typeof page;
-};
-
export const getKeysInfiniteQueryKey = (options?: Options): QueryKey> => [
createQueryKey('getKeys', options, true)
];
@@ -1971,6 +2268,25 @@ export const getNodesByIdChannelsOptions = (options: Options) => [
+ createQueryKey('getNodesByIdChannelsByChannelIdChaincodes', options)
+];
+
+export const getNodesByIdChannelsByChannelIdChaincodesOptions = (options: Options) => {
+ return queryOptions({
+ queryFn: async ({ queryKey, signal }) => {
+ const { data } = await getNodesByIdChannelsByChannelIdChaincodes({
+ ...options,
+ ...queryKey[0],
+ signal,
+ throwOnError: true
+ });
+ return data;
+ },
+ queryKey: getNodesByIdChannelsByChannelIdChaincodesQueryKey(options)
+ });
+};
+
export const getNodesByIdEventsQueryKey = (options: Options) => [
createQueryKey('getNodesByIdEvents', options)
];
@@ -2287,6 +2603,34 @@ export const getOrganizationsOptions = (options?: Options)
});
};
+export const getOrganizationsInfiniteQueryKey = (options?: Options): QueryKey> => [
+ createQueryKey('getOrganizations', options, true)
+];
+
+export const getOrganizationsInfiniteOptions = (options?: Options) => {
+ return infiniteQueryOptions, QueryKey>, number | Pick>[0], 'body' | 'headers' | 'path' | 'query'>>(
+ // @ts-ignore
+ {
+ queryFn: async ({ pageParam, queryKey, signal }) => {
+ // @ts-ignore
+ const page: Pick>[0], 'body' | 'headers' | 'path' | 'query'> = typeof pageParam === 'object' ? pageParam : {
+ query: {
+ offset: pageParam
+ }
+ };
+ const params = createInfiniteParams(queryKey, page);
+ const { data } = await getOrganizations({
+ ...options,
+ ...params,
+ signal,
+ throwOnError: true
+ });
+ return data;
+ },
+ queryKey: getOrganizationsInfiniteQueryKey(options)
+ });
+};
+
export const postOrganizationsQueryKey = (options: Options) => [
createQueryKey('postOrganizations', options)
];
@@ -2655,6 +2999,39 @@ export const getPluginsByNameDeploymentStatusOptions = (options: Options) => [
+ createQueryKey('postPluginsByNameResume', options)
+];
+
+export const postPluginsByNameResumeOptions = (options: Options) => {
+ return queryOptions({
+ queryFn: async ({ queryKey, signal }) => {
+ const { data } = await postPluginsByNameResume({
+ ...options,
+ ...queryKey[0],
+ signal,
+ throwOnError: true
+ });
+ return data;
+ },
+ queryKey: postPluginsByNameResumeQueryKey(options)
+ });
+};
+
+export const postPluginsByNameResumeMutation = (options?: Partial>) => {
+ const mutationOptions: UseMutationOptions> = {
+ mutationFn: async (localOptions) => {
+ const { data } = await postPluginsByNameResume({
+ ...options,
+ ...localOptions,
+ throwOnError: true
+ });
+ return data;
+ }
+ };
+ return mutationOptions;
+};
+
export const getPluginsByNameServicesQueryKey = (options: Options) => [
createQueryKey('getPluginsByNameServices', options)
];
diff --git a/web/src/api/client/sdk.gen.ts b/web/src/api/client/sdk.gen.ts
index 4363d56..bc7feec 100644
--- a/web/src/api/client/sdk.gen.ts
+++ b/web/src/api/client/sdk.gen.ts
@@ -1,10 +1,123 @@
// This file is auto-generated by @hey-api/openapi-ts
import { createClient, createConfig, type Options } from '@hey-api/client-fetch';
-import type { PostAuthChangePasswordData, PostAuthChangePasswordResponse, PostAuthChangePasswordError, PostAuthLoginData, PostAuthLoginResponse, PostAuthLoginError, PostAuthLogoutData, PostAuthLogoutResponse, PostAuthLogoutError, GetAuthMeData, GetAuthMeResponse, GetAuthMeError, GetBackupsData, GetBackupsResponse, GetBackupsError, PostBackupsData, PostBackupsResponse, PostBackupsError, GetBackupsSchedulesData, GetBackupsSchedulesResponse, GetBackupsSchedulesError, PostBackupsSchedulesData, PostBackupsSchedulesResponse, PostBackupsSchedulesError, DeleteBackupsSchedulesByIdData, DeleteBackupsSchedulesByIdError, GetBackupsSchedulesByIdData, GetBackupsSchedulesByIdResponse, GetBackupsSchedulesByIdError, PutBackupsSchedulesByIdData, PutBackupsSchedulesByIdResponse, PutBackupsSchedulesByIdError, PutBackupsSchedulesByIdDisableData, PutBackupsSchedulesByIdDisableResponse, PutBackupsSchedulesByIdDisableError, PutBackupsSchedulesByIdEnableData, PutBackupsSchedulesByIdEnableResponse, PutBackupsSchedulesByIdEnableError, GetBackupsTargetsData, GetBackupsTargetsResponse, GetBackupsTargetsError, PostBackupsTargetsData, PostBackupsTargetsResponse, PostBackupsTargetsError, DeleteBackupsTargetsByIdData, DeleteBackupsTargetsByIdError, GetBackupsTargetsByIdData, GetBackupsTargetsByIdResponse, GetBackupsTargetsByIdError, PutBackupsTargetsByIdData, PutBackupsTargetsByIdResponse, PutBackupsTargetsByIdError, DeleteBackupsByIdData, DeleteBackupsByIdError, GetBackupsByIdData, GetBackupsByIdResponse, GetBackupsByIdError, PostDummyData, PostDummyResponse, GetKeyProvidersData, GetKeyProvidersResponse, GetKeyProvidersError, PostKeyProvidersData, PostKeyProvidersResponse, PostKeyProvidersError, DeleteKeyProvidersByIdData, DeleteKeyProvidersByIdError, GetKeyProvidersByIdData, GetKeyProvidersByIdResponse, GetKeyProvidersByIdError, GetKeysData, GetKeysResponse, GetKeysError, PostKeysData, PostKeysResponse, PostKeysError, GetKeysAllData, GetKeysAllResponse, GetKeysAllError, GetKeysFilterData, GetKeysFilterResponse, GetKeysFilterError, DeleteKeysByIdData, DeleteKeysByIdError, GetKeysByIdData, GetKeysByIdResponse, GetKeysByIdError, PostKeysByKeyIdSignData, PostKeysByKeyIdSignResponse, PostKeysByKeyIdSignError, GetNetworksBesuData, GetNetworksBesuResponse, GetNetworksBesuError, PostNetworksBesuData, PostNetworksBesuResponse, PostNetworksBesuError, PostNetworksBesuImportData, PostNetworksBesuImportResponse, PostNetworksBesuImportError, DeleteNetworksBesuByIdData, DeleteNetworksBesuByIdError, GetNetworksBesuByIdData, GetNetworksBesuByIdResponse, GetNetworksBesuByIdError, GetNetworksFabricData, GetNetworksFabricResponse, GetNetworksFabricError, PostNetworksFabricData, PostNetworksFabricResponse, PostNetworksFabricError, GetNetworksFabricByNameByNameData, GetNetworksFabricByNameByNameResponse, GetNetworksFabricByNameByNameError, PostNetworksFabricImportData, PostNetworksFabricImportResponse, PostNetworksFabricImportError, PostNetworksFabricImportWithOrgData, PostNetworksFabricImportWithOrgResponse, PostNetworksFabricImportWithOrgError, DeleteNetworksFabricByIdData, DeleteNetworksFabricByIdError, GetNetworksFabricByIdData, GetNetworksFabricByIdResponse, GetNetworksFabricByIdError, PostNetworksFabricByIdAnchorPeersData, PostNetworksFabricByIdAnchorPeersResponse, PostNetworksFabricByIdAnchorPeersError, GetNetworksFabricByIdBlocksData, GetNetworksFabricByIdBlocksResponse, GetNetworksFabricByIdBlocksError, GetNetworksFabricByIdBlocksByBlockNumData, GetNetworksFabricByIdBlocksByBlockNumResponse, GetNetworksFabricByIdBlocksByBlockNumError, GetNetworksFabricByIdChannelConfigData, GetNetworksFabricByIdChannelConfigResponse, GetNetworksFabricByIdChannelConfigError, GetNetworksFabricByIdCurrentChannelConfigData, GetNetworksFabricByIdCurrentChannelConfigResponse, GetNetworksFabricByIdCurrentChannelConfigError, GetNetworksFabricByIdInfoData, GetNetworksFabricByIdInfoResponse, GetNetworksFabricByIdInfoError, GetNetworksFabricByIdNodesData, GetNetworksFabricByIdNodesResponse, GetNetworksFabricByIdNodesError, PostNetworksFabricByIdNodesData, PostNetworksFabricByIdNodesResponse, PostNetworksFabricByIdNodesError, DeleteNetworksFabricByIdOrderersByOrdererIdData, DeleteNetworksFabricByIdOrderersByOrdererIdResponse, DeleteNetworksFabricByIdOrderersByOrdererIdError, PostNetworksFabricByIdOrderersByOrdererIdJoinData, PostNetworksFabricByIdOrderersByOrdererIdJoinResponse, PostNetworksFabricByIdOrderersByOrdererIdJoinError, PostNetworksFabricByIdOrderersByOrdererIdUnjoinData, PostNetworksFabricByIdOrderersByOrdererIdUnjoinResponse, PostNetworksFabricByIdOrderersByOrdererIdUnjoinError, PostNetworksFabricByIdOrganizationCrlData, PostNetworksFabricByIdOrganizationCrlResponse, PostNetworksFabricByIdOrganizationCrlError, GetNetworksFabricByIdOrganizationsByOrgIdConfigData, GetNetworksFabricByIdOrganizationsByOrgIdConfigResponse, GetNetworksFabricByIdOrganizationsByOrgIdConfigError, DeleteNetworksFabricByIdPeersByPeerIdData, DeleteNetworksFabricByIdPeersByPeerIdResponse, DeleteNetworksFabricByIdPeersByPeerIdError, PostNetworksFabricByIdPeersByPeerIdJoinData, PostNetworksFabricByIdPeersByPeerIdJoinResponse, PostNetworksFabricByIdPeersByPeerIdJoinError, PostNetworksFabricByIdPeersByPeerIdUnjoinData, PostNetworksFabricByIdPeersByPeerIdUnjoinResponse, PostNetworksFabricByIdPeersByPeerIdUnjoinError, PostNetworksFabricByIdReloadBlockData, PostNetworksFabricByIdReloadBlockResponse, PostNetworksFabricByIdReloadBlockError, GetNetworksFabricByIdTransactionsByTxIdData, GetNetworksFabricByIdTransactionsByTxIdResponse, GetNetworksFabricByIdTransactionsByTxIdError, PostNetworksFabricByIdUpdateConfigData, PostNetworksFabricByIdUpdateConfigResponse, PostNetworksFabricByIdUpdateConfigError, GetNodesData, GetNodesResponse, GetNodesError, PostNodesData, PostNodesResponse, PostNodesError, GetNodesDefaultsBesuNodeData, GetNodesDefaultsBesuNodeResponse, GetNodesDefaultsBesuNodeError, GetNodesDefaultsFabricData, GetNodesDefaultsFabricResponse, GetNodesDefaultsFabricError, GetNodesDefaultsFabricOrdererData, GetNodesDefaultsFabricOrdererResponse, GetNodesDefaultsFabricOrdererError, GetNodesDefaultsFabricPeerData, GetNodesDefaultsFabricPeerResponse, GetNodesDefaultsFabricPeerError, GetNodesPlatformByPlatformData, GetNodesPlatformByPlatformResponse, GetNodesPlatformByPlatformError, DeleteNodesByIdData, DeleteNodesByIdError, GetNodesByIdData, GetNodesByIdResponse, GetNodesByIdError, PutNodesByIdData, PutNodesByIdResponse, PutNodesByIdError, PostNodesByIdCertificatesRenewData, PostNodesByIdCertificatesRenewResponse, PostNodesByIdCertificatesRenewError, GetNodesByIdChannelsData, GetNodesByIdChannelsResponse, GetNodesByIdChannelsError, GetNodesByIdEventsData, GetNodesByIdEventsResponse, GetNodesByIdEventsError, GetNodesByIdLogsData, GetNodesByIdLogsResponse, GetNodesByIdLogsError, PostNodesByIdRestartData, PostNodesByIdRestartResponse, PostNodesByIdRestartError, PostNodesByIdStartData, PostNodesByIdStartResponse, PostNodesByIdStartError, PostNodesByIdStopData, PostNodesByIdStopResponse, PostNodesByIdStopError, GetNotificationsProvidersData, GetNotificationsProvidersResponse, GetNotificationsProvidersError, PostNotificationsProvidersData, PostNotificationsProvidersResponse, PostNotificationsProvidersError, DeleteNotificationsProvidersByIdData, DeleteNotificationsProvidersByIdError, GetNotificationsProvidersByIdData, GetNotificationsProvidersByIdResponse, GetNotificationsProvidersByIdError, PutNotificationsProvidersByIdData, PutNotificationsProvidersByIdResponse, PutNotificationsProvidersByIdError, PostNotificationsProvidersByIdTestData, PostNotificationsProvidersByIdTestResponse, PostNotificationsProvidersByIdTestError, GetOrganizationsData, GetOrganizationsResponse, GetOrganizationsError, PostOrganizationsData, PostOrganizationsResponse, PostOrganizationsError, GetOrganizationsByMspidByMspidData, GetOrganizationsByMspidByMspidResponse, GetOrganizationsByMspidByMspidError, DeleteOrganizationsByIdData, DeleteOrganizationsByIdError, GetOrganizationsByIdData, GetOrganizationsByIdResponse, GetOrganizationsByIdError, PutOrganizationsByIdData, PutOrganizationsByIdResponse, PutOrganizationsByIdError, GetOrganizationsByIdCrlData, GetOrganizationsByIdCrlResponse, GetOrganizationsByIdCrlError, PostOrganizationsByIdCrlRevokePemData, PostOrganizationsByIdCrlRevokePemResponse, PostOrganizationsByIdCrlRevokePemError, DeleteOrganizationsByIdCrlRevokeSerialData, DeleteOrganizationsByIdCrlRevokeSerialResponse, DeleteOrganizationsByIdCrlRevokeSerialError, PostOrganizationsByIdCrlRevokeSerialData, PostOrganizationsByIdCrlRevokeSerialResponse, PostOrganizationsByIdCrlRevokeSerialError, GetOrganizationsByIdRevokedCertificatesData, GetOrganizationsByIdRevokedCertificatesResponse, GetOrganizationsByIdRevokedCertificatesError, GetPluginsData, GetPluginsResponse, GetPluginsError, PostPluginsData, PostPluginsResponse, PostPluginsError, DeletePluginsByNameData, DeletePluginsByNameError, GetPluginsByNameData, GetPluginsByNameResponse, GetPluginsByNameError, PutPluginsByNameData, PutPluginsByNameResponse, PutPluginsByNameError, PostPluginsByNameDeployData, PostPluginsByNameDeployError, GetPluginsByNameDeploymentStatusData, GetPluginsByNameDeploymentStatusResponse, GetPluginsByNameDeploymentStatusError, GetPluginsByNameServicesData, GetPluginsByNameServicesResponse, GetPluginsByNameServicesError, GetPluginsByNameStatusData, GetPluginsByNameStatusResponse, GetPluginsByNameStatusError, PostPluginsByNameStopData, PostPluginsByNameStopError, GetSettingsData, GetSettingsResponse, PostSettingsData, PostSettingsResponse, GetUsersData, GetUsersResponse, GetUsersError, PostUsersData, PostUsersResponse, PostUsersError, DeleteUsersByIdData, DeleteUsersByIdError, GetUsersByIdData, GetUsersByIdResponse, GetUsersByIdError, PutUsersByIdData, PutUsersByIdResponse, PutUsersByIdError, PutUsersByIdPasswordData, PutUsersByIdPasswordResponse, PutUsersByIdPasswordError, PutUsersByIdRoleData, PutUsersByIdRoleResponse, PutUsersByIdRoleError } from './types.gen';
+import type { PostApiV1MetricsDeployData, PostApiV1MetricsDeployResponse, PostApiV1MetricsDeployError, GetApiV1MetricsNodeByIdData, GetApiV1MetricsNodeByIdResponse, GetApiV1MetricsNodeByIdError, GetApiV1MetricsNodeByIdLabelByLabelValuesData, GetApiV1MetricsNodeByIdLabelByLabelValuesResponse, GetApiV1MetricsNodeByIdLabelByLabelValuesError, PostApiV1MetricsNodeByIdQueryData, PostApiV1MetricsNodeByIdQueryResponse, PostApiV1MetricsNodeByIdQueryError, GetApiV1MetricsNodeByIdRangeData, GetApiV1MetricsNodeByIdRangeResponse, GetApiV1MetricsNodeByIdRangeError, PostApiV1MetricsReloadData, PostApiV1MetricsReloadResponse, PostApiV1MetricsReloadError, GetApiV1MetricsStatusData, GetApiV1MetricsStatusResponse, GetApiV1MetricsStatusError, GetAuditLogsData, GetAuditLogsResponse, GetAuditLogsError, GetAuditLogsByIdData, GetAuditLogsByIdResponse, GetAuditLogsByIdError, PostAuthChangePasswordData, PostAuthChangePasswordResponse, PostAuthChangePasswordError, PostAuthLoginData, PostAuthLoginResponse, PostAuthLoginError, PostAuthLogoutData, PostAuthLogoutResponse, PostAuthLogoutError, GetAuthMeData, GetAuthMeResponse, GetAuthMeError, GetBackupsData, GetBackupsResponse, GetBackupsError, PostBackupsData, PostBackupsResponse, PostBackupsError, GetBackupsSchedulesData, GetBackupsSchedulesResponse, GetBackupsSchedulesError, PostBackupsSchedulesData, PostBackupsSchedulesResponse, PostBackupsSchedulesError, DeleteBackupsSchedulesByIdData, DeleteBackupsSchedulesByIdError, GetBackupsSchedulesByIdData, GetBackupsSchedulesByIdResponse, GetBackupsSchedulesByIdError, PutBackupsSchedulesByIdData, PutBackupsSchedulesByIdResponse, PutBackupsSchedulesByIdError, PutBackupsSchedulesByIdDisableData, PutBackupsSchedulesByIdDisableResponse, PutBackupsSchedulesByIdDisableError, PutBackupsSchedulesByIdEnableData, PutBackupsSchedulesByIdEnableResponse, PutBackupsSchedulesByIdEnableError, GetBackupsTargetsData, GetBackupsTargetsResponse, GetBackupsTargetsError, PostBackupsTargetsData, PostBackupsTargetsResponse, PostBackupsTargetsError, DeleteBackupsTargetsByIdData, DeleteBackupsTargetsByIdError, GetBackupsTargetsByIdData, GetBackupsTargetsByIdResponse, GetBackupsTargetsByIdError, PutBackupsTargetsByIdData, PutBackupsTargetsByIdResponse, PutBackupsTargetsByIdError, DeleteBackupsByIdData, DeleteBackupsByIdError, GetBackupsByIdData, GetBackupsByIdResponse, GetBackupsByIdError, PostDummyData, PostDummyResponse, GetKeyProvidersData, GetKeyProvidersResponse, GetKeyProvidersError, PostKeyProvidersData, PostKeyProvidersResponse, PostKeyProvidersError, DeleteKeyProvidersByIdData, DeleteKeyProvidersByIdError, GetKeyProvidersByIdData, GetKeyProvidersByIdResponse, GetKeyProvidersByIdError, GetKeysData, GetKeysResponse, GetKeysError, PostKeysData, PostKeysResponse, PostKeysError, GetKeysAllData, GetKeysAllResponse, GetKeysAllError, GetKeysFilterData, GetKeysFilterResponse, GetKeysFilterError, DeleteKeysByIdData, DeleteKeysByIdError, GetKeysByIdData, GetKeysByIdResponse, GetKeysByIdError, PostKeysByKeyIdSignData, PostKeysByKeyIdSignResponse, PostKeysByKeyIdSignError, GetNetworksBesuData, GetNetworksBesuResponse, GetNetworksBesuError, PostNetworksBesuData, PostNetworksBesuResponse, PostNetworksBesuError, PostNetworksBesuImportData, PostNetworksBesuImportResponse, PostNetworksBesuImportError, DeleteNetworksBesuByIdData, DeleteNetworksBesuByIdError, GetNetworksBesuByIdData, GetNetworksBesuByIdResponse, GetNetworksBesuByIdError, GetNetworksFabricData, GetNetworksFabricResponse, GetNetworksFabricError, PostNetworksFabricData, PostNetworksFabricResponse, PostNetworksFabricError, GetNetworksFabricByNameByNameData, GetNetworksFabricByNameByNameResponse, GetNetworksFabricByNameByNameError, PostNetworksFabricImportData, PostNetworksFabricImportResponse, PostNetworksFabricImportError, PostNetworksFabricImportWithOrgData, PostNetworksFabricImportWithOrgResponse, PostNetworksFabricImportWithOrgError, DeleteNetworksFabricByIdData, DeleteNetworksFabricByIdError, GetNetworksFabricByIdData, GetNetworksFabricByIdResponse, GetNetworksFabricByIdError, PostNetworksFabricByIdAnchorPeersData, PostNetworksFabricByIdAnchorPeersResponse, PostNetworksFabricByIdAnchorPeersError, GetNetworksFabricByIdBlocksData, GetNetworksFabricByIdBlocksResponse, GetNetworksFabricByIdBlocksError, GetNetworksFabricByIdBlocksByBlockNumData, GetNetworksFabricByIdBlocksByBlockNumResponse, GetNetworksFabricByIdBlocksByBlockNumError, GetNetworksFabricByIdChannelConfigData, GetNetworksFabricByIdChannelConfigResponse, GetNetworksFabricByIdChannelConfigError, GetNetworksFabricByIdCurrentChannelConfigData, GetNetworksFabricByIdCurrentChannelConfigResponse, GetNetworksFabricByIdCurrentChannelConfigError, GetNetworksFabricByIdInfoData, GetNetworksFabricByIdInfoResponse, GetNetworksFabricByIdInfoError, GetNetworksFabricByIdNodesData, GetNetworksFabricByIdNodesResponse, GetNetworksFabricByIdNodesError, PostNetworksFabricByIdNodesData, PostNetworksFabricByIdNodesResponse, PostNetworksFabricByIdNodesError, DeleteNetworksFabricByIdOrderersByOrdererIdData, DeleteNetworksFabricByIdOrderersByOrdererIdResponse, DeleteNetworksFabricByIdOrderersByOrdererIdError, PostNetworksFabricByIdOrderersByOrdererIdJoinData, PostNetworksFabricByIdOrderersByOrdererIdJoinResponse, PostNetworksFabricByIdOrderersByOrdererIdJoinError, PostNetworksFabricByIdOrderersByOrdererIdUnjoinData, PostNetworksFabricByIdOrderersByOrdererIdUnjoinResponse, PostNetworksFabricByIdOrderersByOrdererIdUnjoinError, PostNetworksFabricByIdOrganizationCrlData, PostNetworksFabricByIdOrganizationCrlResponse, PostNetworksFabricByIdOrganizationCrlError, GetNetworksFabricByIdOrganizationsByOrgIdConfigData, GetNetworksFabricByIdOrganizationsByOrgIdConfigResponse, GetNetworksFabricByIdOrganizationsByOrgIdConfigError, DeleteNetworksFabricByIdPeersByPeerIdData, DeleteNetworksFabricByIdPeersByPeerIdResponse, DeleteNetworksFabricByIdPeersByPeerIdError, PostNetworksFabricByIdPeersByPeerIdJoinData, PostNetworksFabricByIdPeersByPeerIdJoinResponse, PostNetworksFabricByIdPeersByPeerIdJoinError, PostNetworksFabricByIdPeersByPeerIdUnjoinData, PostNetworksFabricByIdPeersByPeerIdUnjoinResponse, PostNetworksFabricByIdPeersByPeerIdUnjoinError, PostNetworksFabricByIdReloadBlockData, PostNetworksFabricByIdReloadBlockResponse, PostNetworksFabricByIdReloadBlockError, GetNetworksFabricByIdTransactionsByTxIdData, GetNetworksFabricByIdTransactionsByTxIdResponse, GetNetworksFabricByIdTransactionsByTxIdError, PostNetworksFabricByIdUpdateConfigData, PostNetworksFabricByIdUpdateConfigResponse, PostNetworksFabricByIdUpdateConfigError, GetNodesData, GetNodesResponse, GetNodesError, PostNodesData, PostNodesResponse, PostNodesError, GetNodesDefaultsBesuNodeData, GetNodesDefaultsBesuNodeResponse, GetNodesDefaultsBesuNodeError, GetNodesDefaultsFabricData, GetNodesDefaultsFabricResponse, GetNodesDefaultsFabricError, GetNodesDefaultsFabricOrdererData, GetNodesDefaultsFabricOrdererResponse, GetNodesDefaultsFabricOrdererError, GetNodesDefaultsFabricPeerData, GetNodesDefaultsFabricPeerResponse, GetNodesDefaultsFabricPeerError, GetNodesPlatformByPlatformData, GetNodesPlatformByPlatformResponse, GetNodesPlatformByPlatformError, DeleteNodesByIdData, DeleteNodesByIdError, GetNodesByIdData, GetNodesByIdResponse, GetNodesByIdError, PutNodesByIdData, PutNodesByIdResponse, PutNodesByIdError, PostNodesByIdCertificatesRenewData, PostNodesByIdCertificatesRenewResponse, PostNodesByIdCertificatesRenewError, GetNodesByIdChannelsData, GetNodesByIdChannelsResponse, GetNodesByIdChannelsError, GetNodesByIdChannelsByChannelIdChaincodesData, GetNodesByIdChannelsByChannelIdChaincodesResponse, GetNodesByIdChannelsByChannelIdChaincodesError, GetNodesByIdEventsData, GetNodesByIdEventsResponse, GetNodesByIdEventsError, GetNodesByIdLogsData, GetNodesByIdLogsResponse, GetNodesByIdLogsError, PostNodesByIdRestartData, PostNodesByIdRestartResponse, PostNodesByIdRestartError, PostNodesByIdStartData, PostNodesByIdStartResponse, PostNodesByIdStartError, PostNodesByIdStopData, PostNodesByIdStopResponse, PostNodesByIdStopError, GetNotificationsProvidersData, GetNotificationsProvidersResponse, GetNotificationsProvidersError, PostNotificationsProvidersData, PostNotificationsProvidersResponse, PostNotificationsProvidersError, DeleteNotificationsProvidersByIdData, DeleteNotificationsProvidersByIdError, GetNotificationsProvidersByIdData, GetNotificationsProvidersByIdResponse, GetNotificationsProvidersByIdError, PutNotificationsProvidersByIdData, PutNotificationsProvidersByIdResponse, PutNotificationsProvidersByIdError, PostNotificationsProvidersByIdTestData, PostNotificationsProvidersByIdTestResponse, PostNotificationsProvidersByIdTestError, GetOrganizationsData, GetOrganizationsResponse, GetOrganizationsError, PostOrganizationsData, PostOrganizationsResponse, PostOrganizationsError, GetOrganizationsByMspidByMspidData, GetOrganizationsByMspidByMspidResponse, GetOrganizationsByMspidByMspidError, DeleteOrganizationsByIdData, DeleteOrganizationsByIdError, GetOrganizationsByIdData, GetOrganizationsByIdResponse, GetOrganizationsByIdError, PutOrganizationsByIdData, PutOrganizationsByIdResponse, PutOrganizationsByIdError, GetOrganizationsByIdCrlData, GetOrganizationsByIdCrlResponse, GetOrganizationsByIdCrlError, PostOrganizationsByIdCrlRevokePemData, PostOrganizationsByIdCrlRevokePemResponse, PostOrganizationsByIdCrlRevokePemError, DeleteOrganizationsByIdCrlRevokeSerialData, DeleteOrganizationsByIdCrlRevokeSerialResponse, DeleteOrganizationsByIdCrlRevokeSerialError, PostOrganizationsByIdCrlRevokeSerialData, PostOrganizationsByIdCrlRevokeSerialResponse, PostOrganizationsByIdCrlRevokeSerialError, GetOrganizationsByIdRevokedCertificatesData, GetOrganizationsByIdRevokedCertificatesResponse, GetOrganizationsByIdRevokedCertificatesError, GetPluginsData, GetPluginsResponse, GetPluginsError, PostPluginsData, PostPluginsResponse, PostPluginsError, DeletePluginsByNameData, DeletePluginsByNameError, GetPluginsByNameData, GetPluginsByNameResponse, GetPluginsByNameError, PutPluginsByNameData, PutPluginsByNameResponse, PutPluginsByNameError, PostPluginsByNameDeployData, PostPluginsByNameDeployError, GetPluginsByNameDeploymentStatusData, GetPluginsByNameDeploymentStatusResponse, GetPluginsByNameDeploymentStatusError, PostPluginsByNameResumeData, PostPluginsByNameResumeResponse, PostPluginsByNameResumeError, GetPluginsByNameServicesData, GetPluginsByNameServicesResponse, GetPluginsByNameServicesError, GetPluginsByNameStatusData, GetPluginsByNameStatusResponse, GetPluginsByNameStatusError, PostPluginsByNameStopData, PostPluginsByNameStopError, GetSettingsData, GetSettingsResponse, PostSettingsData, PostSettingsResponse, GetUsersData, GetUsersResponse, GetUsersError, PostUsersData, PostUsersResponse, PostUsersError, DeleteUsersByIdData, DeleteUsersByIdError, GetUsersByIdData, GetUsersByIdResponse, GetUsersByIdError, PutUsersByIdData, PutUsersByIdResponse, PutUsersByIdError, PutUsersByIdPasswordData, PutUsersByIdPasswordResponse, PutUsersByIdPasswordError, PutUsersByIdRoleData, PutUsersByIdRoleResponse, PutUsersByIdRoleError } from './types.gen';
export const client = createClient(createConfig());
+/**
+ * Deploy a new Prometheus instance
+ * Deploys a new Prometheus instance with the specified configuration
+ */
+export const postApiV1MetricsDeploy = (options: Options) => {
+ return (options?.client ?? client).post({
+ url: '/api/v1/metrics/deploy',
+ ...options,
+ headers: {
+ 'Content-Type': 'application/json',
+ ...options?.headers
+ }
+ });
+};
+
+/**
+ * Get metrics for a specific node
+ * Retrieves metrics for a specific node by ID and optional PromQL query
+ */
+export const getApiV1MetricsNodeById = (options: Options) => {
+ return (options?.client ?? client).get({
+ url: '/api/v1/metrics/node/{id}',
+ ...options
+ });
+};
+
+/**
+ * Get label values for a specific label
+ * Retrieves all values for a specific label, optionally filtered by metric matches and node ID
+ */
+export const getApiV1MetricsNodeByIdLabelByLabelValues = (options: Options) => {
+ return (options?.client ?? client).get({
+ querySerializer: {
+ array: {
+ explode: false,
+ style: 'form'
+ }
+ },
+ url: '/api/v1/metrics/node/{id}/label/{label}/values',
+ ...options
+ });
+};
+
+/**
+ * Execute custom Prometheus query
+ * Execute a custom Prometheus query with optional time range
+ */
+export const postApiV1MetricsNodeByIdQuery = (options: Options) => {
+ return (options?.client ?? client).post({
+ url: '/api/v1/metrics/node/{id}/query',
+ ...options,
+ headers: {
+ 'Content-Type': 'application/json',
+ ...options?.headers
+ }
+ });
+};
+
+/**
+ * Get metrics for a specific node with time range
+ * Retrieves metrics for a specific node within a specified time range
+ */
+export const getApiV1MetricsNodeByIdRange = (options: Options) => {
+ return (options?.client ?? client).get({
+ url: '/api/v1/metrics/node/{id}/range',
+ ...options
+ });
+};
+
+/**
+ * Reload Prometheus configuration
+ * Triggers a reload of the Prometheus configuration to pick up any changes
+ */
+export const postApiV1MetricsReload = (options?: Options) => {
+ return (options?.client ?? client).post({
+ url: '/api/v1/metrics/reload',
+ ...options
+ });
+};
+
+/**
+ * Get Prometheus status
+ * Returns the current status of the Prometheus instance including version, port, and configuration
+ */
+export const getApiV1MetricsStatus = (options?: Options) => {
+ return (options?.client ?? client).get({
+ url: '/api/v1/metrics/status',
+ ...options
+ });
+};
+
+/**
+ * List audit logs
+ * Retrieves a paginated list of audit logs with optional filters
+ */
+export const getAuditLogs = (options?: Options) => {
+ return (options?.client ?? client).get({
+ url: '/audit/logs',
+ ...options
+ });
+};
+
+/**
+ * Get audit log
+ * Retrieves a specific audit log by ID
+ */
+export const getAuditLogsById = (options: Options) => {
+ return (options?.client ?? client).get({
+ url: '/audit/logs/{id}',
+ ...options
+ });
+};
+
/**
* Change own password
* Allows a user to change their own password
@@ -925,6 +1038,17 @@ export const getNodesByIdChannels = (optio
});
};
+/**
+ * Get committed chaincodes for a Fabric peer
+ * Retrieves all committed chaincodes for a specific channel on a Fabric peer node
+ */
+export const getNodesByIdChannelsByChannelIdChaincodes = (options: Options) => {
+ return (options?.client ?? client).get({
+ url: '/nodes/{id}/channels/{channelID}/chaincodes',
+ ...options
+ });
+};
+
/**
* Get node events
* Get a paginated list of events for a specific node
@@ -1288,6 +1412,17 @@ export const getPluginsByNameDeploymentStatus = (options: Options) => {
+ return (options?.client ?? client).post({
+ url: '/plugins/{name}/resume',
+ ...options
+ });
+};
+
/**
* Get Docker Compose services
* Get all services defined in the plugin's docker-compose configuration
diff --git a/web/src/api/client/types.gen.ts b/web/src/api/client/types.gen.ts
index c4021c7..9dea33f 100644
--- a/web/src/api/client/types.gen.ts
+++ b/web/src/api/client/types.gen.ts
@@ -1,5 +1,33 @@
// This file is auto-generated by @hey-api/openapi-ts
+export type AuditEvent = {
+ affectedResource?: string;
+ details?: {
+ [key: string]: unknown;
+ };
+ eventOutcome?: AuditEventOutcome;
+ eventSource?: string;
+ eventType?: string;
+ id?: number;
+ requestId?: string;
+ sessionId?: string;
+ severity?: AuditSeverity;
+ sourceIp?: string;
+ timestamp?: string;
+ userIdentity?: number;
+};
+
+export type AuditEventOutcome = 'SUCCESS' | 'FAILURE' | 'PENDING';
+
+export type AuditListLogsResponse = {
+ items?: Array;
+ page?: number;
+ page_size?: number;
+ total_count?: number;
+};
+
+export type AuditSeverity = 'DEBUG' | 'INFO' | 'WARNING' | 'CRITICAL';
+
export type AuthChangePasswordRequest = {
current_password: string;
new_password: string;
@@ -82,6 +110,57 @@ export type BlockTransactionWrite = {
export type BlockTxType = 'MESSAGE' | 'CONFIG' | 'CONFIG_UPDATE' | 'ENDORSER_TRANSACTION' | 'ORDERER_TRANSACTION' | 'DELIVER_SEEK_INFO' | 'CHAINCODE_PACKAGE';
+export type CommonQueryResult = {
+ data?: {
+ result?: Array<{
+ metric?: {
+ [key: string]: string;
+ };
+ /**
+ * For instant queries
+ */
+ value?: Array;
+ /**
+ * For range queries (matrix)
+ */
+ values?: Array>;
+ }>;
+ resultType?: string;
+ };
+ status?: string;
+};
+
+export type GithubComChainlaunchChainlaunchPkgMetricsCommonStatus = {
+ /**
+ * DeploymentMode is the current deployment mode
+ */
+ deployment_mode?: string;
+ /**
+ * Error is any error that occurred while getting the status
+ */
+ error?: string;
+ /**
+ * Port is the port Prometheus is listening on
+ */
+ port?: number;
+ /**
+ * ScrapeInterval is the current scrape interval
+ */
+ scrape_interval?: TimeDuration;
+ /**
+ * StartedAt is when the instance was started
+ */
+ started_at?: string;
+ /**
+ * Status is the current status of the Prometheus instance (e.g. "running", "stopped", "not_deployed")
+ */
+ status?: string;
+ /**
+ * Version is the version of Prometheus being used
+ */
+ version?: string;
+};
+
export type GithubComChainlaunchChainlaunchPkgNetworksHttpErrorResponse = {
code?: number;
error?: string;
@@ -130,6 +209,9 @@ export type HandlerDeleteRevokedCertificateRequest = {
};
export type HandlerOrganizationResponse = {
+ adminSignKeyId?: number;
+ adminTlsKeyId?: number;
+ clientSignKeyId?: number;
createdAt?: string;
description?: string;
id?: number;
@@ -143,6 +225,13 @@ export type HandlerOrganizationResponse = {
updatedAt?: string;
};
+export type HandlerPaginatedOrganizationsResponse = {
+ count?: number;
+ items?: Array;
+ limit?: number;
+ offset?: number;
+};
+
export type HandlerRevokeCertificateByPemRequest = {
/**
* PEM encoded certificate
@@ -266,6 +355,15 @@ export type HttpChainInfoResponse = {
previousBlockHash?: string;
};
+export type HttpChaincodeResponse = {
+ endorsementPlugin?: string;
+ initRequired?: boolean;
+ name?: string;
+ sequence?: number;
+ validationPlugin?: string;
+ version?: string;
+};
+
export type HttpChannelConfigResponse = {
config?: {
[key: string]: unknown;
@@ -769,6 +867,8 @@ export type HttpUpdateBesuNodeRequest = {
};
externalIp?: string;
internalIp?: string;
+ metricsEnabled?: boolean;
+ metricsPort?: number;
networkId: number;
p2pHost: string;
p2pPort: number;
@@ -862,6 +962,20 @@ export type HttpUpdateProviderRequest = {
type: 'SMTP';
};
+export type MetricsCustomQueryRequest = {
+ end?: string;
+ query: string;
+ start?: string;
+ step?: string;
+};
+
+export type MetricsDeployPrometheusRequest = {
+ deployment_mode: string;
+ prometheus_port: number;
+ prometheus_version: string;
+ scrape_interval: number;
+};
+
export type ModelsCertificateRequest = {
commonName: string;
country?: Array;
@@ -1002,17 +1116,12 @@ export type PluginServiceStatus = {
};
export type ResponseErrorResponse = {
- details?: {
- [key: string]: unknown;
- };
- message?: string;
- type?: string;
+ error?: string;
};
export type ResponseResponse = {
data?: unknown;
- error?: ResponseErrorResponse;
- success?: boolean;
+ message?: string;
};
export type ServiceBesuNodeDefaults = {
@@ -1021,6 +1130,13 @@ export type ServiceBesuNodeDefaults = {
};
externalIp?: string;
internalIp?: string;
+ /**
+ * Metrics configuration
+ */
+ metricsEnabled?: boolean;
+ metricsHost?: string;
+ metricsPort?: number;
+ metricsProtocol?: string;
mode?: ServiceMode;
p2pHost?: string;
p2pPort?: number;
@@ -1034,6 +1150,13 @@ export type ServiceBesuNodeProperties = {
externalIp?: string;
internalIp?: string;
keyId?: number;
+ /**
+ * Metrics configuration
+ */
+ metricsEnabled?: boolean;
+ metricsHost?: string;
+ metricsPort?: number;
+ metricsProtocol?: string;
mode?: string;
networkId?: number;
/**
@@ -1106,35 +1229,13 @@ export type ServiceNetworkNode = {
createdAt?: string;
id?: number;
networkId?: number;
- node?: ServiceNode;
+ node?: ServiceNodeResponse;
nodeId?: number;
role?: string;
status?: string;
updatedAt?: string;
};
-export type ServiceNode = {
- createdAt?: string;
- /**
- * Node deployment configuration interface that can be one of: FabricPeerDeploymentConfig, FabricOrdererDeploymentConfig, or BesuNodeDeploymentConfig
- */
- deploymentConfig?: unknown;
- endpoint?: string;
- errorMessage?: string;
- id?: number;
- mspId?: string;
- name?: string;
- /**
- * Base interface for all node configurations
- */
- nodeConfig?: unknown;
- nodeType?: TypesNodeType;
- platform?: TypesBlockchainPlatform;
- publicEndpoint?: string;
- status?: TypesNodeStatus;
- updatedAt?: string;
-};
-
export type ServiceNodeDefaults = {
adminAddress?: string;
chaincodeAddress?: string;
@@ -1149,6 +1250,24 @@ export type ServiceNodeDefaults = {
serviceName?: string;
};
+export type ServiceNodeResponse = {
+ besuNode?: ServiceBesuNodeProperties;
+ createdAt?: string;
+ endpoint?: string;
+ errorMessage?: string;
+ fabricOrderer?: ServiceFabricOrdererProperties;
+ /**
+ * Type-specific fields
+ */
+ fabricPeer?: ServiceFabricPeerProperties;
+ id?: number;
+ name?: string;
+ nodeType?: TypesNodeType;
+ platform?: string;
+ status?: string;
+ updatedAt?: string;
+};
+
export type ServiceNodesDefaultsResult = {
availableAddresses?: Array;
orderers?: Array;
@@ -1168,6 +1287,8 @@ export type ServiceSettingConfig = {
peerTemplateCMD?: string;
};
+export type TimeDuration = -9223372036854776000 | 9223372036854776000 | 1 | 1000 | 1000000 | 1000000000 | 60000000000 | 3600000000000;
+
export type TypesAddressOverride = {
from?: string;
tlsCACert?: string;
@@ -1182,6 +1303,9 @@ export type TypesBesuNodeConfig = {
externalIp: string;
internalIp: string;
keyId: number;
+ metricsEnabled?: boolean;
+ metricsPort?: number;
+ metricsProtocol?: string;
/**
* @Description The deployment mode (service or docker)
*/
@@ -1195,6 +1319,7 @@ export type TypesBesuNodeConfig = {
* @Description The type of node (fabric-peer, fabric-orderer, besu)
*/
type?: string;
+ version?: string;
};
export type TypesBlockchainPlatform = 'FABRIC' | 'BESU';
@@ -1215,6 +1340,29 @@ export type TypesDockerCompose = {
contents?: string;
};
+export type TypesDocumentation = {
+ /**
+ * Examples contains example configurations and usage
+ */
+ examples?: Array;
+ /**
+ * README contains the main documentation for the plugin
+ */
+ readme?: string;
+ /**
+ * Troubleshooting contains common issues and their solutions
+ */
+ troubleshooting?: Array;
+};
+
+export type TypesExample = {
+ description?: string;
+ name?: string;
+ parameters?: {
+ [key: string]: unknown;
+ };
+};
+
export type TypesFabricOrdererConfig = {
/**
* @Description Address overrides for the orderer
@@ -1314,12 +1462,15 @@ export type TypesFabricPeerConfig = {
};
export type TypesMetadata = {
+ author?: string;
+ description?: string;
+ license?: string;
name?: string;
+ repository?: string;
+ tags?: Array;
version?: string;
};
-export type TypesNodeStatus = 'PENDING' | 'RUNNING' | 'STOPPED' | 'STOPPING' | 'STARTING' | 'UPDATING' | 'ERROR';
-
export type TypesNodeType = 'FABRIC_PEER' | 'FABRIC_ORDERER' | 'BESU_FULLNODE';
export type TypesOrdererAddressOverride = {
@@ -1342,6 +1493,7 @@ export type TypesParameterSpec = {
description?: string;
enum?: Array;
type?: string;
+ 'x-source'?: TypesXSourceType;
};
export type TypesService = {
@@ -1354,9 +1506,18 @@ export type TypesService = {
export type TypesSpec = {
dockerCompose?: TypesDockerCompose;
+ documentation?: TypesDocumentation;
parameters?: GithubComChainlaunchChainlaunchPkgPluginTypesParameters;
};
+export type TypesTroubleshootingItem = {
+ description?: string;
+ problem?: string;
+ solution?: string;
+};
+
+export type TypesXSourceType = 'fabric-peer' | 'key' | 'fabric-org' | 'fabric-network' | 'fabric-key';
+
export type UrlUrl = {
/**
* append a query ('?') even if RawQuery is empty
@@ -1409,6 +1570,418 @@ export type X509ExtKeyUsage = 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 |
export type X509KeyUsage = 1 | 2 | 4 | 8 | 16 | 32 | 64 | 128 | 256;
+export type PostApiV1MetricsDeployData = {
+ /**
+ * Prometheus deployment configuration
+ */
+ body: MetricsDeployPrometheusRequest;
+ path?: never;
+ query?: never;
+ url: '/api/v1/metrics/deploy';
+};
+
+export type PostApiV1MetricsDeployErrors = {
+ /**
+ * Bad Request
+ */
+ 400: {
+ [key: string]: string;
+ };
+ /**
+ * Internal Server Error
+ */
+ 500: {
+ [key: string]: string;
+ };
+};
+
+export type PostApiV1MetricsDeployError = PostApiV1MetricsDeployErrors[keyof PostApiV1MetricsDeployErrors];
+
+export type PostApiV1MetricsDeployResponses = {
+ /**
+ * OK
+ */
+ 200: {
+ [key: string]: string;
+ };
+};
+
+export type PostApiV1MetricsDeployResponse = PostApiV1MetricsDeployResponses[keyof PostApiV1MetricsDeployResponses];
+
+export type GetApiV1MetricsNodeByIdData = {
+ body?: never;
+ path: {
+ /**
+ * Node ID
+ */
+ id: string;
+ };
+ query?: {
+ /**
+ * PromQL query to filter metrics
+ */
+ query?: string;
+ };
+ url: '/api/v1/metrics/node/{id}';
+};
+
+export type GetApiV1MetricsNodeByIdErrors = {
+ /**
+ * Bad Request
+ */
+ 400: {
+ [key: string]: string;
+ };
+ /**
+ * Internal Server Error
+ */
+ 500: {
+ [key: string]: string;
+ };
+};
+
+export type GetApiV1MetricsNodeByIdError = GetApiV1MetricsNodeByIdErrors[keyof GetApiV1MetricsNodeByIdErrors];
+
+export type GetApiV1MetricsNodeByIdResponses = {
+ /**
+ * OK
+ */
+ 200: {
+ [key: string]: unknown;
+ };
+};
+
+export type GetApiV1MetricsNodeByIdResponse = GetApiV1MetricsNodeByIdResponses[keyof GetApiV1MetricsNodeByIdResponses];
+
+export type GetApiV1MetricsNodeByIdLabelByLabelValuesData = {
+ body?: never;
+ path: {
+ /**
+ * Node ID
+ */
+ id: string;
+ /**
+ * Label name
+ */
+ label: string;
+ };
+ query?: {
+ /**
+ * Metric matches (e.g. {__name__=\
+ */
+ match?: Array;
+ };
+ url: '/api/v1/metrics/node/{id}/label/{label}/values';
+};
+
+export type GetApiV1MetricsNodeByIdLabelByLabelValuesErrors = {
+ /**
+ * Bad request
+ */
+ 400: {
+ [key: string]: unknown;
+ };
+ /**
+ * Internal server error
+ */
+ 500: {
+ [key: string]: unknown;
+ };
+};
+
+export type GetApiV1MetricsNodeByIdLabelByLabelValuesError = GetApiV1MetricsNodeByIdLabelByLabelValuesErrors[keyof GetApiV1MetricsNodeByIdLabelByLabelValuesErrors];
+
+export type GetApiV1MetricsNodeByIdLabelByLabelValuesResponses = {
+ /**
+ * Label values
+ */
+ 200: {
+ [key: string]: unknown;
+ };
+};
+
+export type GetApiV1MetricsNodeByIdLabelByLabelValuesResponse = GetApiV1MetricsNodeByIdLabelByLabelValuesResponses[keyof GetApiV1MetricsNodeByIdLabelByLabelValuesResponses];
+
+export type PostApiV1MetricsNodeByIdQueryData = {
+ /**
+ * Query parameters
+ */
+ body: MetricsCustomQueryRequest;
+ path: {
+ /**
+ * Node ID
+ */
+ id: string;
+ };
+ query?: never;
+ url: '/api/v1/metrics/node/{id}/query';
+};
+
+export type PostApiV1MetricsNodeByIdQueryErrors = {
+ /**
+ * Bad Request
+ */
+ 400: {
+ [key: string]: string;
+ };
+ /**
+ * Internal Server Error
+ */
+ 500: {
+ [key: string]: string;
+ };
+};
+
+export type PostApiV1MetricsNodeByIdQueryError = PostApiV1MetricsNodeByIdQueryErrors[keyof PostApiV1MetricsNodeByIdQueryErrors];
+
+export type PostApiV1MetricsNodeByIdQueryResponses = {
+ /**
+ * OK
+ */
+ 200: CommonQueryResult;
+};
+
+export type PostApiV1MetricsNodeByIdQueryResponse = PostApiV1MetricsNodeByIdQueryResponses[keyof PostApiV1MetricsNodeByIdQueryResponses];
+
+export type GetApiV1MetricsNodeByIdRangeData = {
+ body?: never;
+ path: {
+ /**
+ * Node ID
+ */
+ id: string;
+ };
+ query: {
+ /**
+ * PromQL query
+ */
+ query: string;
+ /**
+ * Start time (RFC3339 format)
+ */
+ start: string;
+ /**
+ * End time (RFC3339 format)
+ */
+ end: string;
+ /**
+ * Step duration (e.g. 1m, 5m, 1h)
+ */
+ step: string;
+ };
+ url: '/api/v1/metrics/node/{id}/range';
+};
+
+export type GetApiV1MetricsNodeByIdRangeErrors = {
+ /**
+ * Bad request
+ */
+ 400: {
+ [key: string]: unknown;
+ };
+ /**
+ * Internal server error
+ */
+ 500: {
+ [key: string]: unknown;
+ };
+};
+
+export type GetApiV1MetricsNodeByIdRangeError = GetApiV1MetricsNodeByIdRangeErrors[keyof GetApiV1MetricsNodeByIdRangeErrors];
+
+export type GetApiV1MetricsNodeByIdRangeResponses = {
+ /**
+ * Metrics data
+ */
+ 200: {
+ [key: string]: unknown;
+ };
+};
+
+export type GetApiV1MetricsNodeByIdRangeResponse = GetApiV1MetricsNodeByIdRangeResponses[keyof GetApiV1MetricsNodeByIdRangeResponses];
+
+export type PostApiV1MetricsReloadData = {
+ body?: never;
+ path?: never;
+ query?: never;
+ url: '/api/v1/metrics/reload';
+};
+
+export type PostApiV1MetricsReloadErrors = {
+ /**
+ * Internal Server Error
+ */
+ 500: {
+ [key: string]: string;
+ };
+};
+
+export type PostApiV1MetricsReloadError = PostApiV1MetricsReloadErrors[keyof PostApiV1MetricsReloadErrors];
+
+export type PostApiV1MetricsReloadResponses = {
+ /**
+ * OK
+ */
+ 200: {
+ [key: string]: string;
+ };
+};
+
+export type PostApiV1MetricsReloadResponse = PostApiV1MetricsReloadResponses[keyof PostApiV1MetricsReloadResponses];
+
+export type GetApiV1MetricsStatusData = {
+ body?: never;
+ path?: never;
+ query?: never;
+ url: '/api/v1/metrics/status';
+};
+
+export type GetApiV1MetricsStatusErrors = {
+ /**
+ * Internal Server Error
+ */
+ 500: {
+ [key: string]: string;
+ };
+};
+
+export type GetApiV1MetricsStatusError = GetApiV1MetricsStatusErrors[keyof GetApiV1MetricsStatusErrors];
+
+export type GetApiV1MetricsStatusResponses = {
+ /**
+ * OK
+ */
+ 200: GithubComChainlaunchChainlaunchPkgMetricsCommonStatus;
+};
+
+export type GetApiV1MetricsStatusResponse = GetApiV1MetricsStatusResponses[keyof GetApiV1MetricsStatusResponses];
+
+export type GetAuditLogsData = {
+ body?: never;
+ path?: never;
+ query?: {
+ /**
+ * Page number (default: 1)
+ */
+ page?: number;
+ /**
+ * Page size (default: 10)
+ */
+ page_size?: number;
+ /**
+ * Start time (RFC3339 format)
+ */
+ start?: string;
+ /**
+ * End time (RFC3339 format)
+ */
+ end?: string;
+ /**
+ * Filter by event type
+ */
+ event_type?: string;
+ /**
+ * Filter by user ID
+ */
+ user_id?: string;
+ };
+ url: '/audit/logs';
+};
+
+export type GetAuditLogsErrors = {
+ /**
+ * Bad Request
+ */
+ 400: {
+ [key: string]: string;
+ };
+ /**
+ * Unauthorized
+ */
+ 401: {
+ [key: string]: string;
+ };
+ /**
+ * Forbidden
+ */
+ 403: {
+ [key: string]: string;
+ };
+ /**
+ * Internal Server Error
+ */
+ 500: {
+ [key: string]: string;
+ };
+};
+
+export type GetAuditLogsError = GetAuditLogsErrors[keyof GetAuditLogsErrors];
+
+export type GetAuditLogsResponses = {
+ /**
+ * OK
+ */
+ 200: AuditListLogsResponse;
+};
+
+export type GetAuditLogsResponse = GetAuditLogsResponses[keyof GetAuditLogsResponses];
+
+export type GetAuditLogsByIdData = {
+ body?: never;
+ path: {
+ /**
+ * Log ID
+ */
+ id: string;
+ };
+ query?: never;
+ url: '/audit/logs/{id}';
+};
+
+export type GetAuditLogsByIdErrors = {
+ /**
+ * Bad Request
+ */
+ 400: {
+ [key: string]: string;
+ };
+ /**
+ * Unauthorized
+ */
+ 401: {
+ [key: string]: string;
+ };
+ /**
+ * Forbidden
+ */
+ 403: {
+ [key: string]: string;
+ };
+ /**
+ * Not Found
+ */
+ 404: {
+ [key: string]: string;
+ };
+ /**
+ * Internal Server Error
+ */
+ 500: {
+ [key: string]: string;
+ };
+};
+
+export type GetAuditLogsByIdError = GetAuditLogsByIdErrors[keyof GetAuditLogsByIdErrors];
+
+export type GetAuditLogsByIdResponses = {
+ /**
+ * OK
+ */
+ 200: AuditEvent;
+};
+
+export type GetAuditLogsByIdResponse = GetAuditLogsByIdResponses[keyof GetAuditLogsByIdResponses];
+
export type PostAuthChangePasswordData = {
/**
* Password change request
@@ -4179,6 +4752,48 @@ export type GetNodesByIdChannelsResponses = {
export type GetNodesByIdChannelsResponse = GetNodesByIdChannelsResponses[keyof GetNodesByIdChannelsResponses];
+export type GetNodesByIdChannelsByChannelIdChaincodesData = {
+ body?: never;
+ path: {
+ /**
+ * Node ID
+ */
+ id: number;
+ /**
+ * Channel ID
+ */
+ channelID: string;
+ };
+ query?: never;
+ url: '/nodes/{id}/channels/{channelID}/chaincodes';
+};
+
+export type GetNodesByIdChannelsByChannelIdChaincodesErrors = {
+ /**
+ * Validation error
+ */
+ 400: ResponseErrorResponse;
+ /**
+ * Node not found
+ */
+ 404: ResponseErrorResponse;
+ /**
+ * Internal server error
+ */
+ 500: ResponseErrorResponse;
+};
+
+export type GetNodesByIdChannelsByChannelIdChaincodesError = GetNodesByIdChannelsByChannelIdChaincodesErrors[keyof GetNodesByIdChannelsByChannelIdChaincodesErrors];
+
+export type GetNodesByIdChannelsByChannelIdChaincodesResponses = {
+ /**
+ * OK
+ */
+ 200: Array;
+};
+
+export type GetNodesByIdChannelsByChannelIdChaincodesResponse = GetNodesByIdChannelsByChannelIdChaincodesResponses[keyof GetNodesByIdChannelsByChannelIdChaincodesResponses];
+
export type GetNodesByIdEventsData = {
body?: never;
path: {
@@ -4599,7 +5214,16 @@ export type PostNotificationsProvidersByIdTestResponse = PostNotificationsProvid
export type GetOrganizationsData = {
body?: never;
path?: never;
- query?: never;
+ query?: {
+ /**
+ * Maximum number of organizations to return
+ */
+ limit?: number;
+ /**
+ * Number of organizations to skip
+ */
+ offset?: number;
+ };
url: '/organizations';
};
@@ -4618,7 +5242,7 @@ export type GetOrganizationsResponses = {
/**
* OK
*/
- 200: Array;
+ 200: HandlerPaginatedOrganizationsResponse;
};
export type GetOrganizationsResponse = GetOrganizationsResponses[keyof GetOrganizationsResponses];
@@ -5268,6 +5892,42 @@ export type GetPluginsByNameDeploymentStatusResponses = {
export type GetPluginsByNameDeploymentStatusResponse = GetPluginsByNameDeploymentStatusResponses[keyof GetPluginsByNameDeploymentStatusResponses];
+export type PostPluginsByNameResumeData = {
+ body?: never;
+ path: {
+ /**
+ * Plugin name
+ */
+ name: string;
+ };
+ query?: never;
+ url: '/plugins/{name}/resume';
+};
+
+export type PostPluginsByNameResumeErrors = {
+ /**
+ * Not Found
+ */
+ 404: ResponseResponse;
+ /**
+ * Internal Server Error
+ */
+ 500: ResponseResponse;
+};
+
+export type PostPluginsByNameResumeError = PostPluginsByNameResumeErrors[keyof PostPluginsByNameResumeErrors];
+
+export type PostPluginsByNameResumeResponses = {
+ /**
+ * OK
+ */
+ 200: {
+ [key: string]: string;
+ };
+};
+
+export type PostPluginsByNameResumeResponse = PostPluginsByNameResumeResponses[keyof PostPluginsByNameResumeResponses];
+
export type GetPluginsByNameServicesData = {
body?: never;
path: {
diff --git a/web/src/components/dashboard/Header.tsx b/web/src/components/dashboard/Header.tsx
index 01a2b33..6266092 100644
--- a/web/src/components/dashboard/Header.tsx
+++ b/web/src/components/dashboard/Header.tsx
@@ -4,6 +4,8 @@ import { Link } from 'react-router-dom'
import { Breadcrumb, BreadcrumbItem, BreadcrumbLink, BreadcrumbList, BreadcrumbPage, BreadcrumbSeparator } from '../ui/breadcrumb'
import { Separator } from '../ui/separator'
import { SidebarTrigger } from '../ui/sidebar'
+import { Button } from '../ui/button'
+import { MessageSquare, ExternalLink } from 'lucide-react'
export function Header() {
const { breadcrumbs } = useBreadcrumbs()
@@ -34,6 +36,13 @@ export function Header() {
diff --git a/web/src/components/dashboard/Sidebar.tsx b/web/src/components/dashboard/Sidebar.tsx
index 9b7a4c9..0853d4f 100644
--- a/web/src/components/dashboard/Sidebar.tsx
+++ b/web/src/components/dashboard/Sidebar.tsx
@@ -1,5 +1,5 @@
import { Sidebar, SidebarContent, SidebarFooter, SidebarGroup, SidebarGroupLabel, SidebarHeader, SidebarMenu, SidebarMenuButton, SidebarMenuItem, useSidebar } from '@/components/ui/sidebar'
-import { BadgeCheck, Bell, Building, ChevronsUpDown, DatabaseBackup, FileText, Globe, Key, LogOut, Network, Puzzle, Server, Settings, Share2 } from 'lucide-react'
+import { BadgeCheck, Bell, Building, ChevronsUpDown, DatabaseBackup, FileText, Globe, Key, LogOut, Network, Puzzle, Server, Settings, Share2, User, BarChart3 } from 'lucide-react'
;('use client')
// import { Project } from '@/api/client'
@@ -10,17 +10,7 @@ import { useState } from 'react'
import { Link, useLocation } from 'react-router-dom'
import logo from '../../../public/logo.svg'
import { ProBadge } from '../pro/ProBadge'
-import {
- AlertDialog,
- AlertDialogAction,
- AlertDialogCancel,
- AlertDialogContent,
- AlertDialogDescription,
- AlertDialogFooter,
- AlertDialogHeader,
- AlertDialogTitle,
- AlertDialogTrigger,
-} from '../ui/alert-dialog'
+import { AlertDialog, AlertDialogAction, AlertDialogCancel, AlertDialogContent, AlertDialogDescription, AlertDialogFooter, AlertDialogHeader, AlertDialogTitle } from '../ui/alert-dialog'
import { Avatar, AvatarFallback } from '../ui/avatar'
import { DropdownMenu, DropdownMenuContent, DropdownMenuItem, DropdownMenuLabel, DropdownMenuSeparator, DropdownMenuTrigger } from '../ui/dropdown-menu'
@@ -42,6 +32,11 @@ const data = {
url: '/nodes',
icon: Server,
},
+ {
+ title: 'Analytics',
+ url: '/platform/analytics',
+ icon: BarChart3,
+ },
{
title: 'Plugins',
url: '/plugins',
@@ -96,6 +91,12 @@ const data = {
icon: Settings,
roles: ['admin', 'manager'],
},
+ {
+ title: 'Audit Logs',
+ url: '/settings/audit-logs',
+ icon: FileText,
+ roles: ['admin'],
+ },
],
},
{
@@ -250,11 +251,11 @@ function NavUser() {
-
+