diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index 3b72633..bedd360 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -11,6 +11,10 @@ on: - '**/*.md' - 'docs/**' +# Set explicit permissions following the principle of least privilege +permissions: + contents: read # Only need read access to the repository contents + jobs: build-and-test: name: Build and Test on Linux diff --git a/.gitignore b/.gitignore index 16c1a31..51749ba 100644 --- a/.gitignore +++ b/.gitignore @@ -7,4 +7,7 @@ chaindeploy-api-go chainlaunch certs .cursorrules -.env \ No newline at end of file +.env +test-instance +*.db +decode_extradata \ No newline at end of file diff --git a/.vscode/launch.json b/.vscode/launch.json index d8895c5..5da6b16 100644 --- a/.vscode/launch.json +++ b/.vscode/launch.json @@ -4,6 +4,23 @@ // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387 "version": "0.2.0", "configurations": [ + { + "name": "Node Custom", + "type": "go", + "request": "launch", + "mode": "auto", + "program": "${workspaceFolder}", + "args": [ + "serve", + "--data=test-instance", + "--db=test.db" + ], + "env": { + "CHAINLAUNCH_USER": "admin", + "CHAINLAUNCH_PASSWORD": "admin123", + "JAVA_HOME": "/opt/homebrew/opt/openjdk@21" + } + }, { "name": "Node 1", "type": "go", @@ -13,13 +30,12 @@ "args": [ "serve", "--port=8100", - "--dev=false", "--db=./data/chainlaunch.db", ], "env": { "CHAINLAUNCH_USER": "admin", "CHAINLAUNCH_PASSWORD": "admin", - "JAVA_HOME": "/opt/homebrew/Cellar/openjdk/23.0.2" + "JAVA_HOME": "/opt/homebrew/opt/openjdk@21" } }, { @@ -36,7 +52,7 @@ "env": { "CHAINLAUNCH_USER": "admin", "CHAINLAUNCH_PASSWORD": "admin", - "JAVA_HOME": "/opt/homebrew/Cellar/openjdk/23.0.2" + "JAVA_HOME": "/opt/homebrew/opt/openjdk@21" } } ], diff --git a/cmd/fabric/fabric.go b/cmd/fabric/fabric.go index ecf5785..9d9b9e2 100644 --- a/cmd/fabric/fabric.go +++ b/cmd/fabric/fabric.go @@ -21,7 +21,7 @@ func NewFabricCmd(logger *logger.Logger) *cobra.Command { rootCmd.AddCommand( install.NewInstallCmd(logger), create.NewCreateCmd(logger), - query.NewQueryChaincodeCMD(os.Stdout, os.Stderr), + query.NewQueryChaincodeCMD(os.Stdout, os.Stderr, logger), invoke.NewInvokeChaincodeCMD(os.Stdout, os.Stderr, logger), nc.NewNCCmd(logger), ) diff --git a/cmd/fabric/install/install.go b/cmd/fabric/install/install.go index 371a384..5cfe5b2 100644 --- a/cmd/fabric/install/install.go +++ b/cmd/fabric/install/install.go @@ -4,6 +4,7 @@ import ( "archive/tar" "bytes" "compress/gzip" + "context" "encoding/json" "fmt" "io/ioutil" @@ -11,19 +12,20 @@ import ( "path/filepath" "github.com/chainlaunch/chainlaunch/pkg/logger" + "github.com/golang/protobuf/proto" "github.com/pkg/errors" + "google.golang.org/grpc" "io" "strings" - "time" - - pb "github.com/hyperledger/fabric-protos-go/peer" - "github.com/hyperledger/fabric-sdk-go/pkg/client/resmgmt" - "github.com/hyperledger/fabric-sdk-go/pkg/common/providers/fab" - "github.com/hyperledger/fabric-sdk-go/pkg/core/config" - "github.com/hyperledger/fabric-sdk-go/pkg/fab/ccpackager/lifecycle" - "github.com/hyperledger/fabric-sdk-go/pkg/fabsdk" - "github.com/hyperledger/fabric-sdk-go/third_party/github.com/hyperledger/fabric/common/policydsl" + + "github.com/chainlaunch/chainlaunch/pkg/fabric/networkconfig" + "github.com/chainlaunch/chainlaunch/pkg/fabric/policydsl" + "github.com/hyperledger/fabric-admin-sdk/pkg/chaincode" + "github.com/hyperledger/fabric-admin-sdk/pkg/identity" + "github.com/hyperledger/fabric-admin-sdk/pkg/network" + gwidentity "github.com/hyperledger/fabric-gateway/pkg/identity" + pb "github.com/hyperledger/fabric-protos-go-apiv2/peer" "github.com/spf13/cobra" ) @@ -45,9 +47,54 @@ type installCmd struct { logger *logger.Logger } +func (c *installCmd) getPeerAndIdentityForOrg(nc *networkconfig.NetworkConfig, org string, peerID string, userID string) (*grpc.ClientConn, identity.SigningIdentity, error) { + peerConfig, ok := nc.Peers[peerID] + if !ok { + return nil, nil, fmt.Errorf("peer %s not found in network config", peerID) + } + conn, err := c.getPeerConnection(peerConfig.URL, peerConfig.TLSCACerts.PEM) + if err != nil { + return nil, nil, err + } + orgConfig, ok := nc.Organizations[org] + if !ok { + return nil, nil, fmt.Errorf("organization %s not found in network config", org) + } + user, ok := orgConfig.Users[userID] + if !ok { + return nil, nil, fmt.Errorf("user %s not found in network config", userID) + } + userCert, err := gwidentity.CertificateFromPEM([]byte(user.Cert.PEM)) + if err != nil { + return nil, nil, errors.Wrapf(err, "failed to read user certificate for user %s and org %s", userID, org) + } + userPrivateKey, err := gwidentity.PrivateKeyFromPEM([]byte(user.Key.PEM)) + if err != nil { + return nil, nil, errors.Wrapf(err, "failed to read user private key for user %s and org %s", userID, org) + } + userIdentity, err := identity.NewPrivateKeySigningIdentity(org, userCert, userPrivateKey) + if err != nil { + return nil, nil, errors.Wrapf(err, "failed to create user identity for user %s and org %s", userID, org) + } + return conn, userIdentity, nil +} + +func (c *installCmd) getPeerConnection(address string, tlsCACert string) (*grpc.ClientConn, error) { + networkNode := network.Node{ + Addr: strings.Replace(address, "grpcs://", "", 1), + TLSCACertByte: []byte(tlsCACert), + } + conn, err := network.DialConnection(networkNode) + if err != nil { + return nil, fmt.Errorf("failed to dial connection: %w", err) + } + return conn, nil + +} + func (c installCmd) start() error { var chaincodeEndpoint string - + ctx := context.Background() if c.local { // Use local chaincode address directly chaincodeEndpoint = c.chaincodeAddress @@ -71,50 +118,89 @@ func (c installCmd) start() error { return err } _ = pkg - packageID := lifecycle.ComputePackageID(label, pkg) + packageID := chaincode.GetPackageID(label, pkg) c.logger.Infof("packageID: %s", packageID) - - // install chaincode in peers - configBackend := config.FromFile(c.networkConfig) - - clientsMap := map[string]*resmgmt.Client{} - sdk, err := fabsdk.New(configBackend) + nc, err := networkconfig.LoadFromFile(c.networkConfig) if err != nil { return err } - for idx, mspID := range c.organizations { - clientContext := sdk.Context(fabsdk.WithUser(c.users[idx]), fabsdk.WithOrg(mspID)) - clientsMap[mspID], err = resmgmt.New(clientContext) - if err != nil { - return err + + // // install chaincode in peers + // configBackend := config.FromFile(c.networkConfig) + + // clientsMap := map[string]*resmgmt.Client{} + // sdk, err := fabsdk.New(configBackend) + // if err != nil { + // return err + // } + // for idx, mspID := range c.organizations { + // clientContext := sdk.Context(fabsdk.WithUser(c.users[idx]), fabsdk.WithOrg(mspID)) + // clientsMap[mspID], err = resmgmt.New(clientContext) + // if err != nil { + // return err + // } + // } + for idx, org := range c.organizations { + orgConfig, ok := nc.Organizations[org] + if !ok { + return fmt.Errorf("organization %s not found in network config", org) } - } - for mspID, resmgmtClient := range clientsMap { - _, err = resmgmtClient.LifecycleInstallCC( - resmgmt.LifecycleInstallCCRequest{ - Label: label, - Package: pkg, - }, - resmgmt.WithTimeout(fab.ResMgmt, 20*time.Minute), - resmgmt.WithTimeout(fab.PeerResponse, 20*time.Minute), - ) - if err != nil { - return err + for _, peerID := range orgConfig.Peers { + peerConfig, ok := nc.Peers[peerID] + if !ok { + return fmt.Errorf("peer %s not found in network config", peerID) + } + conn, userIdentity, err := c.getPeerAndIdentityForOrg(nc, org, peerID, c.users[idx]) + if err != nil { + return err + } + defer conn.Close() + peerClient := chaincode.NewPeer(conn, userIdentity) + result, err := peerClient.Install(ctx, bytes.NewReader(pkg)) + if err != nil && !strings.Contains(err.Error(), "chaincode already successfully installed") { + return errors.Wrapf(err, "failed to install chaincode for user %s and org %s", c.users[idx], org) + } + if result != nil { + c.logger.Infof("Chaincode installed %s in %s", result.PackageId, peerConfig.URL) + } else { + c.logger.Infof("Chaincode already installed in %s", peerConfig.URL) + } } - c.logger.Infof("Chaincode installed in %s", mspID) } - sp, err := policydsl.FromString(c.signaturePolicy) + + // sp, err := policydsl.FromString(c.signaturePolicy) + // if err != nil { + // return err + // } + applicationPolicy, err := chaincode.NewApplicationPolicy(c.signaturePolicy, "") if err != nil { return err } version := "1" sequence := 1 - resmgmtClient := clientsMap[c.organizations[0]] - committedCCs, err := resmgmtClient.LifecycleQueryCommittedCC( + allOrgGateways := []*chaincode.Gateway{} + for idx, org := range c.organizations { + orgConfig, ok := nc.Organizations[org] + if !ok { + return fmt.Errorf("organization %s not found in network config", org) + } + if len(orgConfig.Peers) == 0 { + return fmt.Errorf("organization %s has no peers", org) + } + conn, userIdentity, err := c.getPeerAndIdentityForOrg(nc, org, orgConfig.Peers[0], c.users[idx]) + if err != nil { + return err + } + defer conn.Close() + gateway := chaincode.NewGateway(conn, userIdentity) + allOrgGateways = append(allOrgGateways, gateway) + } + firstGateway := allOrgGateways[0] + committedCC, err := firstGateway.QueryCommittedWithName( + ctx, c.channel, - resmgmt.LifecycleQueryCommittedCCRequest{Name: c.chaincode}, - resmgmt.WithTargetFilter(&multipleMSPFilter{mspIDs: c.organizations}), + c.chaincode, ) if err != nil { c.logger.Warnf("Error when getting commited chaincodes: %v", err) @@ -132,12 +218,22 @@ func (c installCmd) start() error { return err } } - c.logger.Infof("Commited CCs=%d", len(committedCCs)) - shouldCommit := len(committedCCs) == 0 - if len(committedCCs) > 0 { - firstCommittedCC := committedCCs[0] - signaturePolicyString := firstCommittedCC.SignaturePolicy.String() - newSignaturePolicyString := sp.String() + c.logger.Infof("Commited CC=%v", committedCC) + shouldCommit := committedCC == nil + if committedCC != nil { + appPolicy := pb.ApplicationPolicy{} + err = proto.Unmarshal(committedCC.GetValidationParameter(), &appPolicy) + if err != nil { + return err + } + var signaturePolicyString string + switch policy := appPolicy.Type.(type) { + case *pb.ApplicationPolicy_SignaturePolicy: + signaturePolicyString = policy.SignaturePolicy.String() + default: + return errors.Errorf("unsupported policy type %T", policy) + } + newSignaturePolicyString := applicationPolicy.String() if signaturePolicyString != newSignaturePolicyString { c.logger.Infof("Signature policy changed, old=%s new=%s", signaturePolicyString, newSignaturePolicyString) shouldCommit = true @@ -145,7 +241,7 @@ func (c installCmd) start() error { c.logger.Infof("Signature policy not changed, signaturePolicy=%s", signaturePolicyString) } // compare collections - oldCollections := firstCommittedCC.CollectionConfig + oldCollections := committedCC.GetCollections().GetConfig() newCollections := collections if len(oldCollections) != len(newCollections) { c.logger.Infof("Collection config changed, old=%d new=%d", len(oldCollections), len(newCollections)) @@ -170,68 +266,68 @@ func (c installCmd) start() error { } } } - if len(committedCCs) > 0 { + if committedCC != nil { if shouldCommit { - version = committedCCs[len(committedCCs)-1].Version - sequence = int(committedCCs[len(committedCCs)-1].Sequence) + 1 + version = committedCC.GetVersion() + sequence = int(committedCC.GetSequence()) + 1 } else { - version = committedCCs[len(committedCCs)-1].Version - sequence = int(committedCCs[len(committedCCs)-1].Sequence) + version = committedCC.GetVersion() + sequence = int(committedCC.GetSequence()) } c.logger.Infof("Chaincode already committed, version=%s sequence=%d", version, sequence) } c.logger.Infof("Should commit=%v", shouldCommit) - // approve chaincode in orgs - approveCCRequest := resmgmt.LifecycleApproveCCRequest{ - Name: label, - Version: version, + // // approve chaincode in orgs + // approveCCRequest := resmgmt.LifecycleApproveCCRequest{ + // Name: label, + // Version: version, + // PackageID: packageID, + // Sequence: int64(sequence), + // CollectionConfig: collections, + // EndorsementPlugin: "escc", + // ValidationPlugin: "vscc", + // SignaturePolicy: sp, + // InitRequired: false, + // } + + chaincodeDef := &chaincode.Definition{ + ChannelName: c.channel, PackageID: packageID, - Sequence: int64(sequence), - CollectionConfig: collections, + Name: c.chaincode, + Version: version, EndorsementPlugin: "escc", ValidationPlugin: "vscc", - SignaturePolicy: sp, + Sequence: int64(sequence), + ApplicationPolicy: applicationPolicy, InitRequired: false, + Collections: nil, } - for mspID, resmgmtClient := range clientsMap { - - txID, err := resmgmtClient.LifecycleApproveCC( - c.channel, - approveCCRequest, - resmgmt.WithTargetFilter(&mspFilter{mspID: mspID}), - resmgmt.WithTimeout(fab.ResMgmt, 20*time.Minute), - resmgmt.WithTimeout(fab.PeerResponse, 20*time.Minute), - ) + for idx, gateway := range allOrgGateways { + err := gateway.Approve(ctx, chaincodeDef) + if err != nil { + c.logger.Errorf("Error when approving chaincode: %v", err) + return err + } if err != nil && !strings.Contains(err.Error(), "redefine uncommitted") { c.logger.Errorf("Error when approving chaincode: %v", err) return err } - c.logger.Infof("Chaincode approved, org=%s tx=%s", mspID, txID) + c.logger.Infof("Chaincode approved, org=%s", c.organizations[idx]) } if shouldCommit { + // commit chaincode in orgs - txID, err := resmgmtClient.LifecycleCommitCC( - c.channel, - resmgmt.LifecycleCommitCCRequest{ - Name: label, - Version: version, - Sequence: int64(sequence), - CollectionConfig: collections, - EndorsementPlugin: "escc", - ValidationPlugin: "vscc", - SignaturePolicy: sp, - InitRequired: false, - }, - resmgmt.WithTimeout(fab.ResMgmt, 2*time.Minute), - resmgmt.WithTimeout(fab.PeerResponse, 2*time.Minute), - resmgmt.WithTargetFilter(&multipleMSPFilter{mspIDs: c.organizations}), + err := firstGateway.Commit( + ctx, + chaincodeDef, ) if err != nil { + c.logger.Errorf("Error when committing chaincode: %v", err) return err } - c.logger.Infof("Chaincode committed, tx=%s", txID) + c.logger.Infof("Chaincode committed") + } - sdk.Close() if c.envFile != "" { err = os.WriteFile(c.envFile, []byte(fmt.Sprintf(` @@ -247,30 +343,6 @@ CORE_PEER_TLS_ENABLED=false return nil } -type multipleMSPFilter struct { - mspIDs []string -} - -// Accept returns true if this peer is to be included in the target list -func (f *multipleMSPFilter) Accept(peer fab.Peer) bool { - // check if its of one of the mspIDs - for _, mspID := range f.mspIDs { - if peer.MSPID() == mspID { - return true - } - } - return false -} - -type mspFilter struct { - mspID string -} - -// Accept returns true if this peer is to be included in the target list -func (f *mspFilter) Accept(peer fab.Peer) bool { - return peer.MSPID() == f.mspID -} - func (c *installCmd) getChaincodePackage(label string, codeTarGz []byte) ([]byte, error) { var err error metadataJson := fmt.Sprintf(` diff --git a/cmd/fabric/invoke/invoke.go b/cmd/fabric/invoke/invoke.go index 4ac3ebb..cb26a35 100644 --- a/cmd/fabric/invoke/invoke.go +++ b/cmd/fabric/invoke/invoke.go @@ -3,12 +3,17 @@ package invoke import ( "fmt" "io" + "math/rand/v2" + "strings" + "github.com/chainlaunch/chainlaunch/pkg/fabric/networkconfig" "github.com/chainlaunch/chainlaunch/pkg/logger" - "github.com/hyperledger/fabric-sdk-go/pkg/client/channel" - "github.com/hyperledger/fabric-sdk-go/pkg/core/config" - "github.com/hyperledger/fabric-sdk-go/pkg/fabsdk" + "github.com/hyperledger/fabric-admin-sdk/pkg/network" + "github.com/hyperledger/fabric-gateway/pkg/client" + "github.com/hyperledger/fabric-gateway/pkg/identity" + "github.com/pkg/errors" "github.com/spf13/cobra" + "google.golang.org/grpc" ) type invokeChaincodeCmd struct { @@ -25,44 +30,124 @@ type invokeChaincodeCmd struct { func (c *invokeChaincodeCmd) validate() error { return nil } + +func (c *invokeChaincodeCmd) getPeerAndIdentityForOrg(nc *networkconfig.NetworkConfig, org string, peerID string, userID string) (*grpc.ClientConn, identity.Sign, *identity.X509Identity, error) { + peerConfig, ok := nc.Peers[peerID] + if !ok { + return nil, nil, nil, fmt.Errorf("peer %s not found in network config", peerID) + } + conn, err := c.getPeerConnection(peerConfig.URL, peerConfig.TLSCACerts.PEM) + if err != nil { + return nil, nil, nil, err + } + orgConfig, ok := nc.Organizations[org] + if !ok { + return nil, nil, nil, fmt.Errorf("organization %s not found in network config", org) + } + user, ok := orgConfig.Users[userID] + if !ok { + return nil, nil, nil, fmt.Errorf("user %s not found in network config", userID) + } + userCert, err := identity.CertificateFromPEM([]byte(user.Cert.PEM)) + if err != nil { + return nil, nil, nil, errors.Wrapf(err, "failed to read user certificate for user %s and org %s", userID, org) + } + userPrivateKey, err := identity.PrivateKeyFromPEM([]byte(user.Key.PEM)) + if err != nil { + return nil, nil, nil, errors.Wrapf(err, "failed to read user private key for user %s and org %s", userID, org) + } + userPK, err := identity.NewPrivateKeySign(userPrivateKey) + if err != nil { + return nil, nil, nil, errors.Wrapf(err, "failed to create user identity for user %s and org %s", userID, org) + } + userIdentity, err := identity.NewX509Identity(c.mspID, userCert) + if err != nil { + return nil, nil, nil, errors.Wrapf(err, "failed to create user identity for user %s and org %s", userID, org) + } + return conn, userPK, userIdentity, nil +} + +func (c *invokeChaincodeCmd) getPeerConnection(address string, tlsCACert string) (*grpc.ClientConn, error) { + + networkNode := network.Node{ + Addr: strings.Replace(address, "grpcs://", "", 1), + TLSCACertByte: []byte(tlsCACert), + } + conn, err := network.DialConnection(networkNode) + if err != nil { + return nil, fmt.Errorf("failed to dial connection: %w", err) + } + return conn, nil + +} + func (c *invokeChaincodeCmd) run(out io.Writer) error { - configBackend := config.FromFile(c.configPath) - sdk, err := fabsdk.New(configBackend) + networkConfig, err := networkconfig.LoadFromFile(c.configPath) if err != nil { return err } - chContext := sdk.ChannelContext( - c.channel, - fabsdk.WithUser(c.userName), - fabsdk.WithOrg(c.mspID), - ) - ch, err := channel.New(chContext) + + orgConfig, ok := networkConfig.Organizations[c.mspID] + if !ok { + return fmt.Errorf("organization %s not found", c.mspID) + } + _, ok = orgConfig.Users[c.userName] + if !ok { + return fmt.Errorf("user %s not found", c.userName) + } + peers := orgConfig.Peers + if len(peers) == 0 { + return fmt.Errorf("no peers found for organization %s", c.mspID) + } + // Get a random peer from the organization's peers + // If no specific peer ID is provided, select a random one + // Generate a random index + randomIndex := rand.Int() % len(peers) + + peerID := peers[randomIndex] + c.logger.Infof("Randomly selected peer: %s", peerID) + + conn, userPK, userIdentity, err := c.getPeerAndIdentityForOrg(networkConfig, c.mspID, peerID, c.userName) if err != nil { return err } - var args [][]byte + defer conn.Close() + gateway, err := client.Connect(userIdentity, client.WithSign(userPK), client.WithClientConnection(conn)) + if err != nil { + return err + } + defer gateway.Close() + network := gateway.GetNetwork(c.channel) + contract := network.GetContract(c.chaincode) + args := [][]byte{} for _, arg := range c.args { args = append(args, []byte(arg)) } - response, err := ch.Execute( - channel.Request{ - ChaincodeID: c.chaincode, - Fcn: c.fcn, - Args: args, - TransientMap: nil, - InvocationChain: nil, - IsInit: false, - }, - ) + + response, err := contract.NewProposal(c.fcn, client.WithBytesArguments(args...)) if err != nil { - return err + return errors.Wrapf(err, "failed to create proposal") + } + endorseResponse, err := response.Endorse() + if err != nil { + return errors.Wrapf(err, "failed to endorse proposal") + } + submitResponse, err := endorseResponse.Submit() + if err != nil { + return errors.Wrapf(err, "failed to submit proposal") } - _, err = fmt.Fprint(out, string(response.Payload)) + responseBytes, err := submitResponse.Bytes() + if err != nil { + return errors.Wrapf(err, "failed to get response bytes") + } + + _, err = fmt.Fprint(out, string(responseBytes)) if err != nil { return err } - c.logger.Infof("txid=%s", response.TransactionID) + c.logger.Infof("txid=%s", submitResponse.TransactionID()) return nil + } func NewInvokeChaincodeCMD(out io.Writer, errOut io.Writer, logger *logger.Logger) *cobra.Command { diff --git a/cmd/fabric/query/query.go b/cmd/fabric/query/query.go index faa399c..20a405c 100644 --- a/cmd/fabric/query/query.go +++ b/cmd/fabric/query/query.go @@ -2,11 +2,18 @@ package query import ( "fmt" - "github.com/hyperledger/fabric-sdk-go/pkg/client/channel" - "github.com/hyperledger/fabric-sdk-go/pkg/core/config" - "github.com/hyperledger/fabric-sdk-go/pkg/fabsdk" - "github.com/spf13/cobra" "io" + "math/rand/v2" + "strings" + + "github.com/chainlaunch/chainlaunch/pkg/fabric/networkconfig" + "github.com/chainlaunch/chainlaunch/pkg/logger" + "github.com/hyperledger/fabric-admin-sdk/pkg/network" + "github.com/hyperledger/fabric-gateway/pkg/client" + "github.com/hyperledger/fabric-gateway/pkg/identity" + "github.com/pkg/errors" + "github.com/spf13/cobra" + "google.golang.org/grpc" ) type queryChaincodeCmd struct { @@ -17,52 +24,116 @@ type queryChaincodeCmd struct { chaincode string fcn string args []string + logger *logger.Logger } func (c *queryChaincodeCmd) validate() error { return nil } + +func (c *queryChaincodeCmd) getPeerAndIdentityForOrg(nc *networkconfig.NetworkConfig, org string, peerID string, userID string) (*grpc.ClientConn, identity.Sign, *identity.X509Identity, error) { + peerConfig, ok := nc.Peers[peerID] + if !ok { + return nil, nil, nil, fmt.Errorf("peer %s not found in network config", peerID) + } + conn, err := c.getPeerConnection(peerConfig.URL, peerConfig.TLSCACerts.PEM) + if err != nil { + return nil, nil, nil, err + } + orgConfig, ok := nc.Organizations[org] + if !ok { + return nil, nil, nil, fmt.Errorf("organization %s not found in network config", org) + } + user, ok := orgConfig.Users[userID] + if !ok { + return nil, nil, nil, fmt.Errorf("user %s not found in network config", userID) + } + userCert, err := identity.CertificateFromPEM([]byte(user.Cert.PEM)) + if err != nil { + return nil, nil, nil, errors.Wrapf(err, "failed to read user certificate for user %s and org %s", userID, org) + } + userPrivateKey, err := identity.PrivateKeyFromPEM([]byte(user.Key.PEM)) + if err != nil { + return nil, nil, nil, errors.Wrapf(err, "failed to read user private key for user %s and org %s", userID, org) + } + userPK, err := identity.NewPrivateKeySign(userPrivateKey) + if err != nil { + return nil, nil, nil, errors.Wrapf(err, "failed to create user identity for user %s and org %s", userID, org) + } + userIdentity, err := identity.NewX509Identity(c.mspID, userCert) + if err != nil { + return nil, nil, nil, errors.Wrapf(err, "failed to create user identity for user %s and org %s", userID, org) + } + return conn, userPK, userIdentity, nil +} + +func (c *queryChaincodeCmd) getPeerConnection(address string, tlsCACert string) (*grpc.ClientConn, error) { + + networkNode := network.Node{ + Addr: strings.Replace(address, "grpcs://", "", 1), + TLSCACertByte: []byte(tlsCACert), + } + conn, err := network.DialConnection(networkNode) + if err != nil { + return nil, fmt.Errorf("failed to dial connection: %w", err) + } + return conn, nil +} + func (c *queryChaincodeCmd) run(out io.Writer) error { - configBackend := config.FromFile(c.configPath) - sdk, err := fabsdk.New(configBackend) + networkConfig, err := networkconfig.LoadFromFile(c.configPath) if err != nil { return err } - chContext := sdk.ChannelContext( - c.channel, - fabsdk.WithUser(c.userName), - fabsdk.WithOrg(c.mspID), - ) - ch, err := channel.New(chContext) + + orgConfig, ok := networkConfig.Organizations[c.mspID] + if !ok { + return fmt.Errorf("organization %s not found", c.mspID) + } + _, ok = orgConfig.Users[c.userName] + if !ok { + return fmt.Errorf("user %s not found", c.userName) + } + peers := orgConfig.Peers + if len(peers) == 0 { + return fmt.Errorf("no peers found for organization %s", c.mspID) + } + + randomIndex := rand.Int() % len(peers) + peerID := peers[randomIndex] + c.logger.Infof("Randomly selected peer: %s", peerID) + + conn, userPK, userIdentity, err := c.getPeerAndIdentityForOrg(networkConfig, c.mspID, peerID, c.userName) if err != nil { return err } - var args [][]byte - for _, arg := range c.args { - args = append(args, []byte(arg)) - } - response, err := ch.Query( - channel.Request{ - ChaincodeID: c.chaincode, - Fcn: c.fcn, - Args: args, - TransientMap: nil, - InvocationChain: nil, - IsInit: false, - }, - ) + defer conn.Close() + + gateway, err := client.Connect(userIdentity, client.WithSign(userPK), client.WithClientConnection(conn)) if err != nil { return err } - _, err = fmt.Fprint(out, string(response.Payload)) + defer gateway.Close() + + network := gateway.GetNetwork(c.channel) + contract := network.GetContract(c.chaincode) + + result, err := contract.EvaluateTransaction(c.fcn, c.args...) + if err != nil { + return errors.Wrapf(err, "failed to evaluate transaction") + } + + _, err = fmt.Fprint(out, string(result)) if err != nil { return err } return nil } -func NewQueryChaincodeCMD(out io.Writer, errOut io.Writer) *cobra.Command { - c := &queryChaincodeCmd{} +func NewQueryChaincodeCMD(out io.Writer, errOut io.Writer, logger *logger.Logger) *cobra.Command { + c := &queryChaincodeCmd{ + logger: logger, + } cmd := &cobra.Command{ Use: "query", RunE: func(cmd *cobra.Command, args []string) error { diff --git a/cmd/serve/serve.go b/cmd/serve/serve.go index 41cb99c..9db8412 100644 --- a/cmd/serve/serve.go +++ b/cmd/serve/serve.go @@ -20,6 +20,7 @@ import ( "github.com/chainlaunch/chainlaunch/pkg/auth" backuphttp "github.com/chainlaunch/chainlaunch/pkg/backups/http" backupservice "github.com/chainlaunch/chainlaunch/pkg/backups/service" + configservice "github.com/chainlaunch/chainlaunch/pkg/config" "github.com/chainlaunch/chainlaunch/pkg/db" fabrichandler "github.com/chainlaunch/chainlaunch/pkg/fabric/handler" fabricservice "github.com/chainlaunch/chainlaunch/pkg/fabric/service" @@ -35,6 +36,8 @@ import ( nodesservice "github.com/chainlaunch/chainlaunch/pkg/nodes/service" notificationhttp "github.com/chainlaunch/chainlaunch/pkg/notifications/http" notificationservice "github.com/chainlaunch/chainlaunch/pkg/notifications/service" + settingshttp "github.com/chainlaunch/chainlaunch/pkg/settings/http" + settingsservice "github.com/chainlaunch/chainlaunch/pkg/settings/service" "github.com/go-chi/chi/v5" "github.com/go-chi/chi/v5/middleware" "github.com/go-chi/cors" @@ -47,15 +50,17 @@ import ( httpSwagger "github.com/swaggo/http-swagger" ) -var ( - port int - dbPath string - queries *db.Queries - dev bool - // HTTP TLS configuration variables - tlsCertFile string - tlsKeyFile string -) +// var ( +// port int +// dbPath string +// queries *db.Queries +// dev bool +// // HTTP TLS configuration variables +// tlsCertFile string +// tlsKeyFile string + +// dataPath string +// ) // spaHandler implements the http.Handler interface for serving a Single Page Application type spaHandler struct { @@ -109,8 +114,8 @@ func (h spaHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { // @termsOfService http://swagger.io/terms/ // @contact.name API Support -// @contact.url http://www.chainlaunch.com/support -// @contact.email support@chainlaunch.com +// @contact.url http://chainlaunch.dev/support +// @contact.email support@chainlaunch.dev // @license.name Apache 2.0 // @license.url http://www.apache.org/licenses/LICENSE-2.0.html @@ -133,9 +138,6 @@ func (h spaHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { // @tag.name Providers // @tag.description Key provider management operations -// @tag.name Networks -// @tag.description Blockchain network management operations - // @tag.name Nodes // @tag.description Network node management operations @@ -144,7 +146,6 @@ const ( keyLength = 32 // 256 bits encryptionKeyFile = "encryption_key" sessionKeyFile = "session_key" - configDirName = ".chainlaunch" ) // Add these new functions @@ -159,16 +160,21 @@ func generateRandomKey(length int) ([]byte, error) { return key, nil } -func getConfigDir() (string, error) { - // First check XDG_CONFIG_HOME +func getConfigDir(dataPath string) (string, error) { + // If dataPath is provided, use it directly + if dataPath != "" { + return dataPath, nil + } + + // Fallback to XDG_CONFIG_HOME if configHome := os.Getenv("XDG_CONFIG_HOME"); configHome != "" { return filepath.Join(configHome, "chainlaunch"), nil } - // Then check HOME + // Then fallback to HOME home := os.Getenv("HOME") if home == "" { - // Fallback to user home dir + // Final fallback to user home dir var err error home, err = os.UserHomeDir() if err != nil { @@ -176,18 +182,18 @@ func getConfigDir() (string, error) { } } - // For Linux/Mac: ~/.chainlaunch - return filepath.Join(home, configDirName), nil + // Default fallback: ~/.chainlaunch + return filepath.Join(home, "chainlaunch"), nil } -func ensureKeyExists(filename string) (string, error) { +func ensureKeyExists(filename string, dataPath string) (string, error) { // First check if the key is already set in environment envKey := strings.ToUpper(strings.TrimSuffix(filename, "_key")) if key := os.Getenv(envKey); key != "" { return key, nil } - configDir, err := getConfigDir() + configDir, err := getConfigDir(dataPath) if err != nil { return "", err } @@ -252,7 +258,7 @@ func formatDuration(d time.Duration) string { } // setupServer configures and returns the HTTP server -func setupServer(queries *db.Queries, authService *auth.AuthService, views embed.FS) *chi.Mux { +func setupServer(queries *db.Queries, authService *auth.AuthService, views embed.FS, dev bool, dbPath string, dataPath string) *chi.Mux { // Initialize services keyManagementService, err := service.NewKeyManagementService(queries) if err != nil { @@ -261,12 +267,19 @@ func setupServer(queries *db.Queries, authService *auth.AuthService, views embed if err := keyManagementService.InitializeKeyProviders(context.Background()); err != nil { log.Fatal("Failed to initialize key providers:", err) } - - organizationService := fabricservice.NewOrganizationService(queries, keyManagementService) + configService := configservice.NewConfigService(dataPath) + organizationService := fabricservice.NewOrganizationService(queries, keyManagementService, configService) logger := logger.NewDefault() nodeEventService := nodesservice.NewNodeEventService(queries, logger) - nodesService := nodesservice.NewNodeService(queries, logger, keyManagementService, organizationService, nodeEventService) + settingsService := settingsservice.NewSettingsService(queries, logger) + _, err = settingsService.InitializeDefaultSettings(context.Background()) + if err != nil { + log.Fatalf("Failed to initialize default settings: %v", err) + } + settingsHandler := settingshttp.NewHandler(settingsService, logger) + + nodesService := nodesservice.NewNodeService(queries, logger, keyManagementService, organizationService, nodeEventService, configService, settingsService) networksService := networksservice.NewNetworkService(queries, nodesService, keyManagementService, logger, organizationService) notificationService := notificationservice.NewNotificationService(queries, logger) backupService := backupservice.NewBackupService(queries, logger, notificationService, dbPath) @@ -278,7 +291,7 @@ func setupServer(queries *db.Queries, authService *auth.AuthService, views embed DefaultFailureThreshold: 3, // Alert after 3 consecutive failures Workers: 3, // Use 3 worker goroutines } - monitoringService := monitoring.NewService(monitoringConfig, notificationService) + monitoringService := monitoring.NewService(logger, monitoringConfig, notificationService, nodesService) // Start the monitoring service with a background context monitoringCtx, monitoringCancel := context.WithCancel(context.Background()) @@ -399,9 +412,16 @@ func setupServer(queries *db.Queries, authService *auth.AuthService, views embed backupHandler.RegisterRoutes(r) // Mount notifications routes notificationHandler.RegisterRoutes(r) + // Mount settings routes + settingsHandler.RegisterRoutes(r) }) }) - + r.Get("/api/swagger/*", httpSwagger.Handler( + httpSwagger.URL("/api/swagger/doc.json"), + httpSwagger.DeepLinking(true), + httpSwagger.DocExpansion("none"), + httpSwagger.DomID("swagger-ui"), + )) // Swagger documentation r.Get("/swagger/*", httpSwagger.Handler( httpSwagger.URL("/swagger/doc.json"), @@ -449,140 +469,229 @@ func runMigrations(database *sql.DB, migrationsFS embed.FS) error { return nil } -// Command returns the serve command -func Command(configCMD config.ConfigCMD, logger *logger.Logger) *cobra.Command { - serveCmd := &cobra.Command{ - Use: "serve", - Short: "Start the API server", - Long: `Start the HTTP API server on the specified port. -For example: - chainlaunch serve --port 8100`, - PreRun: func(cmd *cobra.Command, args []string) { - // Ensure the database directory exists - dbDir := filepath.Dir(dbPath) - if err := os.MkdirAll(dbDir, 0755); err != nil { - log.Fatalf("Failed to create database directory: %v", err) - } +type serveCmd struct { + logger *logger.Logger + configCMD config.ConfigCMD - // Initialize database connection - database, err := sql.Open("sqlite3", dbPath) - if err != nil { - log.Fatalf("Failed to open database: %v", err) - } - // Run migrations - if err := runMigrations(database, configCMD.MigrationsFS); err != nil { - log.Fatalf("Failed to run migrations: %v", err) - } + port int + dbPath string + tlsCertFile string + tlsKeyFile string + dataPath string + dev bool - // Create queries instance - queries = db.New(database) - }, - Run: func(cmd *cobra.Command, args []string) { - // Initialize encryption key - encryptionKey, err := ensureKeyExists(encryptionKeyFile) - if err != nil { - log.Fatalf("Failed to initialize encryption key: %v", err) - } - if err := os.Setenv("KEY_ENCRYPTION_KEY", encryptionKey); err != nil { - log.Fatalf("Failed to set encryption key environment variable: %v", err) - } + queries *db.Queries +} - // Initialize session key - sessionKey, err := ensureKeyExists(sessionKeyFile) - if err != nil { - log.Fatalf("Failed to initialize session key: %v", err) - } - if err := os.Setenv("SESSION_ENCRYPTION_KEY", sessionKey); err != nil { - log.Fatalf("Failed to set session key environment variable: %v", err) - } +// validate validates the serve command configuration +func (c *serveCmd) validate() error { + if c.port <= 0 || c.port > 65535 { + return fmt.Errorf("invalid port number: %d", c.port) + } - fmt.Printf("Starting server on port %d...\n", port) - fmt.Printf("Using database: %s\n", dbPath) - if dev { - fmt.Println("Running in development mode") - } else { - fmt.Println("Running in production mode") - } + if c.dbPath == "" { + return fmt.Errorf("database path cannot be empty") + } - // Initialize auth service with database - authService := auth.NewAuthService(queries) + // If TLS is configured, both cert and key files must be provided + if (c.tlsCertFile != "" && c.tlsKeyFile == "") || (c.tlsCertFile == "" && c.tlsKeyFile != "") { + return fmt.Errorf("both TLS certificate and key files must be provided") + } - // Check if any users exist - users, err := authService.ListUsers(context.Background()) - if err != nil { - log.Fatalf("Failed to check existing users: %v", err) - } + // If TLS files are provided, verify they exist + if c.tlsCertFile != "" { + if _, err := os.Stat(c.tlsCertFile); os.IsNotExist(err) { + return fmt.Errorf("TLS certificate file not found: %s", c.tlsCertFile) + } + } + if c.tlsKeyFile != "" { + if _, err := os.Stat(c.tlsKeyFile); os.IsNotExist(err) { + return fmt.Errorf("TLS key file not found: %s", c.tlsKeyFile) + } + } - if len(users) == 0 { - // No users exist, check for required environment variables - username := os.Getenv("CHAINLAUNCH_USER") - password := os.Getenv("CHAINLAUNCH_PASSWORD") + // Ensure data path exists or can be created + if c.dataPath != "" { + if err := os.MkdirAll(c.dataPath, 0755); err != nil { + return fmt.Errorf("failed to create data directory: %v", err) + } + } - if username == "" || password == "" { - log.Fatal("No users found in database. CHAINLAUNCH_USER and CHAINLAUNCH_PASSWORD environment variables must be set for initial user creation") - } + return nil +} - // Create initial user with provided credentials - if err := authService.CreateUser(context.Background(), username, password); err != nil { - log.Fatalf("Failed to create initial user: %v", err) - } - log.Printf("Created initial user with username: %s", username) - } +func (c *serveCmd) preRun() error { + // Ensure the database directory exists + dbDir := filepath.Dir(c.dbPath) + if err := os.MkdirAll(dbDir, 0755); err != nil { + log.Fatalf("Failed to create database directory: %v", err) + } - // Setup and start HTTP server - router := setupServer(queries, authService, configCMD.Views) + // Convert dataPath to absolute path if it's not empty + if c.dataPath != "" { + absPath, err := filepath.Abs(c.dataPath) + if err != nil { + return fmt.Errorf("failed to get absolute path for data directory: %v", err) + } + c.dataPath = absPath + } - // Start HTTP server in a goroutine - httpServer := &http.Server{ - Addr: fmt.Sprintf(":%d", port), - Handler: router, - } + // Initialize database connection + database, err := sql.Open("sqlite3", c.dbPath) + if err != nil { + log.Fatalf("Failed to open database: %v", err) + } + // Run migrations + if err := runMigrations(database, c.configCMD.MigrationsFS); err != nil { + log.Fatalf("Failed to run migrations: %v", err) + } - isTLS := tlsCertFile != "" && tlsKeyFile != "" - // Check if TLS cert and key files exist - if isTLS { - if _, err := os.Stat(tlsCertFile); os.IsNotExist(err) { - log.Fatalf("TLS certificate file not found: %s", tlsCertFile) - } - if _, err := os.Stat(tlsKeyFile); os.IsNotExist(err) { - log.Fatalf("TLS key file not found: %s", tlsKeyFile) - } - } - if isTLS { - logger.Infof("HTTPS server listening on :%d", port) - err = httpServer.ListenAndServeTLS(tlsCertFile, tlsKeyFile) - } else { - logger.Infof("HTTP server listening on :%d", port) - err = httpServer.ListenAndServe() - } + // Create queries instance + c.queries = db.New(database) + + return nil +} + +func (c *serveCmd) run() error { + // Initialize encryption key with dataPath + encryptionKey, err := ensureKeyExists(encryptionKeyFile, c.dataPath) + if err != nil { + log.Fatalf("Failed to initialize encryption key: %v", err) + } + if err := os.Setenv("KEY_ENCRYPTION_KEY", encryptionKey); err != nil { + log.Fatalf("Failed to set encryption key environment variable: %v", err) + } + + // Initialize session key with dataPath + sessionKey, err := ensureKeyExists(sessionKeyFile, c.dataPath) + if err != nil { + log.Fatalf("Failed to initialize session key: %v", err) + } + if err := os.Setenv("SESSION_ENCRYPTION_KEY", sessionKey); err != nil { + log.Fatalf("Failed to set session key environment variable: %v", err) + } + + c.logger.Infof("Starting server on port %d...", c.port) + c.logger.Infof("Using database: %s", c.dbPath) + if c.dev { + c.logger.Info("Running in development mode") + } else { + c.logger.Info("Running in production mode") + } + + // Initialize auth service with database + authService := auth.NewAuthService(c.queries) + + // Check if any users exist + users, err := authService.ListUsers(context.Background()) + if err != nil { + log.Fatalf("Failed to check existing users: %v", err) + } + + // Get environment variables + username := os.Getenv("CHAINLAUNCH_USER") + password := os.Getenv("CHAINLAUNCH_PASSWORD") + + if len(users) == 0 { + // No users exist, check for required environment variables + if username == "" || password == "" { + log.Fatal("No users found in database. CHAINLAUNCH_USER and CHAINLAUNCH_PASSWORD environment variables must be set for initial user creation") + } + + // Create initial user with provided credentials + if err := authService.CreateUser(context.Background(), username, password); err != nil { + log.Fatalf("Failed to create initial user: %v", err) + } + log.Printf("Created initial user with username: %s", username) + } else if password != "" { + // If password is set and users exist, update the first user's password + if err := authService.UpdateUserPassword(context.Background(), users[0].Username, password); err != nil { + log.Fatalf("Failed to update user password: %v", err) + } + log.Printf("Updated password for user: %s", users[0].Username) + } + + // Setup and start HTTP server + router := setupServer(c.queries, authService, c.configCMD.Views, c.dev, c.dbPath, c.dataPath) + + // Start HTTP server in a goroutine + httpServer := &http.Server{ + Addr: fmt.Sprintf(":%d", c.port), + Handler: router, + } + + // Check if TLS cert and key files exist + if c.tlsCertFile != "" && c.tlsKeyFile != "" { + c.logger.Infof("HTTPS server listening on :%d", c.port) + err = httpServer.ListenAndServeTLS(c.tlsCertFile, c.tlsKeyFile) + } else { + c.logger.Infof("HTTP server listening on :%d", c.port) + err = httpServer.ListenAndServe() + } - if err != nil && err != http.ErrServerClosed { - log.Fatalf("Failed to start HTTP server: %v", err) + if err != nil && err != http.ErrServerClosed { + log.Fatalf("Failed to start HTTP server: %v", err) + } + + return nil +} + +func (c *serveCmd) postRun() error { + // do nothing + return nil +} + +// Command returns the serve command +func Command(configCMD config.ConfigCMD, logger *logger.Logger) *cobra.Command { + serveCmd := &serveCmd{ + configCMD: configCMD, + logger: logger, + } + cmd := &cobra.Command{ + Use: "serve", + Short: "Start the API server", + Long: `Start the HTTP API server on the specified port. +For example: + chainlaunch serve --port 8100`, + PreRunE: func(cmd *cobra.Command, args []string) error { + if err := serveCmd.validate(); err != nil { + return err } + return serveCmd.preRun() }, - PostRun: func(cmd *cobra.Command, args []string) { - // Clean up database connection - if queries != nil { - if err := queries.Close(); err != nil { - log.Printf("Error closing database connection: %v", err) - } - } + RunE: func(cmd *cobra.Command, args []string) error { + return serveCmd.run() + }, + PostRunE: func(cmd *cobra.Command, args []string) error { + return serveCmd.postRun() }, } // Add port flags - serveCmd.Flags().IntVarP(&port, "port", "p", 8100, "Port to run the HTTP server on") + cmd.Flags().IntVarP(&serveCmd.port, "port", "p", 8100, "Port to run the HTTP server on") // Add database path flag defaultDBPath := filepath.Join("data", "chainlaunch.db") - serveCmd.Flags().StringVar(&dbPath, "db", defaultDBPath, "Path to SQLite database file") + cmd.Flags().StringVar(&serveCmd.dbPath, "db", defaultDBPath, "Path to SQLite database file") // Add HTTP TLS configuration flags - serveCmd.Flags().StringVar(&tlsCertFile, "tls-cert", "", "Path to TLS certificate file for HTTP server (required)") - serveCmd.Flags().StringVar(&tlsKeyFile, "tls-key", "", "Path to TLS key file for HTTP server (required)") + cmd.Flags().StringVar(&serveCmd.tlsCertFile, "tls-cert", "", "Path to TLS certificate file for HTTP server (required)") + cmd.Flags().StringVar(&serveCmd.tlsKeyFile, "tls-key", "", "Path to TLS key file for HTTP server (required)") + + // Update the default data path to use the OS-specific user config directory + defaultDataPath := "" + if configDir, err := os.UserConfigDir(); err == nil { + defaultDataPath = filepath.Join(configDir, "chainlaunch") + } else { + // Fallback to home directory if UserConfigDir fails + if homeDir, err := os.UserHomeDir(); err == nil { + defaultDataPath = filepath.Join(homeDir, ".chainlaunch") + } + } + cmd.Flags().StringVar(&serveCmd.dataPath, "data", defaultDataPath, "Path to data directory") // Add development mode flag - serveCmd.Flags().BoolVar(&dev, "dev", false, "Run in development mode") + cmd.Flags().BoolVar(&serveCmd.dev, "dev", false, "Run in development mode") - return serveCmd + return cmd } diff --git a/docs/docs.go b/docs/docs.go index 3ea376a..7d933f1 100644 --- a/docs/docs.go +++ b/docs/docs.go @@ -1,4 +1,4 @@ -// Package docs Code generated by swaggo/swag at 2025-03-17 22:16:01.61695 +0100 CET m=+1.512738168. DO NOT EDIT +// Package docs Code generated by swaggo/swag at 2025-04-21 14:16:51.474458 +0200 CEST m=+1.670574834. DO NOT EDIT package docs import "github.com/swaggo/swag" @@ -12,8 +12,8 @@ const docTemplate = `{ "termsOfService": "http://swagger.io/terms/", "contact": { "name": "API Support", - "url": "http://www.chainlaunch.com/support", - "email": "support@chainlaunch.com" + "url": "http://chainlaunch.dev/support", + "email": "support@chainlaunch.dev" }, "license": { "name": "Apache 2.0", @@ -151,7 +151,7 @@ const docTemplate = `{ "application/json" ], "tags": [ - "backups" + "Backups" ], "summary": "List all backups", "responses": { @@ -181,7 +181,7 @@ const docTemplate = `{ "application/json" ], "tags": [ - "backups" + "Backups" ], "summary": "Create a new backup", "parameters": [ @@ -227,7 +227,7 @@ const docTemplate = `{ "application/json" ], "tags": [ - "backup-schedules" + "Backup Schedules" ], "summary": "List all backup schedules", "responses": { @@ -257,7 +257,7 @@ const docTemplate = `{ "application/json" ], "tags": [ - "backup-schedules" + "Backup Schedules" ], "summary": "Create a new backup schedule", "parameters": [ @@ -303,7 +303,7 @@ const docTemplate = `{ "application/json" ], "tags": [ - "backup-schedules" + "Backup Schedules" ], "summary": "Get a backup schedule by ID", "parameters": [ @@ -351,7 +351,7 @@ const docTemplate = `{ "application/json" ], "tags": [ - "backup-schedules" + "Backup Schedules" ], "summary": "Update a backup schedule", "parameters": [ @@ -408,7 +408,7 @@ const docTemplate = `{ "application/json" ], "tags": [ - "backup-schedules" + "Backup Schedules" ], "summary": "Delete a backup schedule", "parameters": [ @@ -455,7 +455,7 @@ const docTemplate = `{ "application/json" ], "tags": [ - "backup-schedules" + "Backup Schedules" ], "summary": "Disable a backup schedule", "parameters": [ @@ -505,7 +505,7 @@ const docTemplate = `{ "application/json" ], "tags": [ - "backup-schedules" + "Backup Schedules" ], "summary": "Enable a backup schedule", "parameters": [ @@ -555,7 +555,7 @@ const docTemplate = `{ "application/json" ], "tags": [ - "backup-targets" + "Backup Targets" ], "summary": "List all backup targets", "responses": { @@ -585,7 +585,7 @@ const docTemplate = `{ "application/json" ], "tags": [ - "backup-targets" + "Backup Targets" ], "summary": "Create a new backup target", "parameters": [ @@ -631,7 +631,7 @@ const docTemplate = `{ "application/json" ], "tags": [ - "backup-targets" + "Backup Targets" ], "summary": "Get a backup target by ID", "parameters": [ @@ -679,7 +679,7 @@ const docTemplate = `{ "application/json" ], "tags": [ - "backup-targets" + "Backup Targets" ], "summary": "Update a backup target", "parameters": [ @@ -736,7 +736,7 @@ const docTemplate = `{ "application/json" ], "tags": [ - "backup-targets" + "Backup Targets" ], "summary": "Delete a backup target", "parameters": [ @@ -783,7 +783,7 @@ const docTemplate = `{ "application/json" ], "tags": [ - "backups" + "Backups" ], "summary": "Get a backup by ID", "parameters": [ @@ -831,7 +831,7 @@ const docTemplate = `{ "application/json" ], "tags": [ - "backups" + "Backups" ], "summary": "Delete a backup", "parameters": [ @@ -878,7 +878,7 @@ const docTemplate = `{ "application/json" ], "tags": [ - "fabric-networks" + "Fabric Networks" ], "summary": "Submit config update proposal", "parameters": [ @@ -965,7 +965,7 @@ const docTemplate = `{ "application/json" ], "tags": [ - "providers" + "Providers" ], "summary": "List all key providers", "responses": { @@ -998,7 +998,7 @@ const docTemplate = `{ "application/json" ], "tags": [ - "providers" + "Providers" ], "summary": "Create a new key provider", "parameters": [ @@ -1050,7 +1050,7 @@ const docTemplate = `{ "application/json" ], "tags": [ - "providers" + "Providers" ], "summary": "Get a specific provider", "parameters": [ @@ -1107,7 +1107,7 @@ const docTemplate = `{ "application/json" ], "tags": [ - "providers" + "Providers" ], "summary": "Delete a provider", "parameters": [ @@ -1172,7 +1172,7 @@ const docTemplate = `{ "application/json" ], "tags": [ - "keys" + "Keys" ], "summary": "Get paginated keys", "parameters": [ @@ -1223,7 +1223,7 @@ const docTemplate = `{ "application/json" ], "tags": [ - "keys" + "Keys" ], "summary": "Create a new key", "parameters": [ @@ -1275,7 +1275,7 @@ const docTemplate = `{ "application/json" ], "tags": [ - "keys" + "Keys" ], "summary": "Get all keys", "responses": { @@ -1310,7 +1310,7 @@ const docTemplate = `{ "application/json" ], "tags": [ - "keys" + "Keys" ], "summary": "Filter keys by algorithm and curve", "parameters": [ @@ -1379,7 +1379,7 @@ const docTemplate = `{ "application/json" ], "tags": [ - "keys" + "Keys" ], "summary": "Get a specific key by ID", "parameters": [ @@ -1436,7 +1436,7 @@ const docTemplate = `{ "application/json" ], "tags": [ - "keys" + "Keys" ], "summary": "Delete a key", "parameters": [ @@ -1492,7 +1492,7 @@ const docTemplate = `{ "application/json" ], "tags": [ - "keys" + "Keys" ], "summary": "Sign a certificate", "parameters": [ @@ -1557,7 +1557,7 @@ const docTemplate = `{ "application/json" ], "tags": [ - "besu-networks" + "Besu Networks" ], "summary": "List Besu networks", "parameters": [ @@ -1604,7 +1604,7 @@ const docTemplate = `{ "application/json" ], "tags": [ - "besu-networks" + "Besu Networks" ], "summary": "Create a new Besu network", "parameters": [ @@ -1619,8 +1619,8 @@ const docTemplate = `{ } ], "responses": { - "201": { - "description": "Created", + "200": { + "description": "OK", "schema": { "$ref": "#/definitions/http.BesuNetworkResponse" } @@ -1650,7 +1650,7 @@ const docTemplate = `{ "application/json" ], "tags": [ - "besu-networks" + "Besu Networks" ], "summary": "Import a Besu network", "parameters": [ @@ -1693,7 +1693,7 @@ const docTemplate = `{ "application/json" ], "tags": [ - "besu-networks" + "Besu Networks" ], "summary": "Get a Besu network by ID", "parameters": [ @@ -1738,7 +1738,7 @@ const docTemplate = `{ "application/json" ], "tags": [ - "besu-networks" + "Besu Networks" ], "summary": "Delete a Besu network", "parameters": [ @@ -1782,7 +1782,7 @@ const docTemplate = `{ "application/json" ], "tags": [ - "fabric-networks" + "Fabric Networks" ], "summary": "List Fabric networks", "parameters": [ @@ -1829,7 +1829,7 @@ const docTemplate = `{ "application/json" ], "tags": [ - "fabric-networks" + "Fabric Networks" ], "summary": "Create a new Fabric network", "parameters": [ @@ -1872,7 +1872,7 @@ const docTemplate = `{ "application/json" ], "tags": [ - "fabric-networks" + "Fabric Networks" ], "summary": "Get a Fabric network by slug", "parameters": [ @@ -1922,7 +1922,7 @@ const docTemplate = `{ "application/json" ], "tags": [ - "fabric-networks" + "Fabric Networks" ], "summary": "Import a Fabric network", "parameters": [ @@ -1968,7 +1968,7 @@ const docTemplate = `{ "application/json" ], "tags": [ - "fabric-networks" + "Fabric Networks" ], "summary": "Import a Fabric network with organization", "parameters": [ @@ -2011,7 +2011,7 @@ const docTemplate = `{ "application/json" ], "tags": [ - "fabric-networks" + "Fabric Networks" ], "summary": "Get a Fabric network by ID", "parameters": [ @@ -2056,7 +2056,7 @@ const docTemplate = `{ "application/json" ], "tags": [ - "fabric-networks" + "Fabric Networks" ], "summary": "Delete a Fabric network", "parameters": [ @@ -2103,7 +2103,7 @@ const docTemplate = `{ "application/json" ], "tags": [ - "fabric-networks" + "Fabric Networks" ], "summary": "Set anchor peers for an organization", "parameters": [ @@ -2146,6 +2146,125 @@ const docTemplate = `{ } } }, + "/networks/fabric/{id}/blocks": { + "get": { + "description": "Get a paginated list of blocks from a Fabric network", + "produces": [ + "application/json" + ], + "tags": [ + "Fabric Networks" + ], + "summary": "Get list of blocks from Fabric network", + "parameters": [ + { + "type": "integer", + "description": "Network ID", + "name": "id", + "in": "path", + "required": true + }, + { + "type": "integer", + "description": "Number of blocks to return (default: 10)", + "name": "limit", + "in": "query" + }, + { + "type": "integer", + "description": "Number of blocks to skip (default: 0)", + "name": "offset", + "in": "query" + }, + { + "type": "boolean", + "description": "Get blocks in reverse order (default: false)", + "name": "reverse", + "in": "query" + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/http.BlockListResponse" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/github_com_chainlaunch_chainlaunch_pkg_networks_http.ErrorResponse" + } + }, + "404": { + "description": "Not Found", + "schema": { + "$ref": "#/definitions/github_com_chainlaunch_chainlaunch_pkg_networks_http.ErrorResponse" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/github_com_chainlaunch_chainlaunch_pkg_networks_http.ErrorResponse" + } + } + } + } + }, + "/networks/fabric/{id}/blocks/{blockNum}": { + "get": { + "description": "Get all transactions from a specific block in a Fabric network", + "produces": [ + "application/json" + ], + "tags": [ + "Fabric Networks" + ], + "summary": "Get transactions from a specific block", + "parameters": [ + { + "type": "integer", + "description": "Network ID", + "name": "id", + "in": "path", + "required": true + }, + { + "type": "integer", + "description": "Block Number", + "name": "blockNum", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/http.BlockTransactionsResponse" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/github_com_chainlaunch_chainlaunch_pkg_networks_http.ErrorResponse" + } + }, + "404": { + "description": "Not Found", + "schema": { + "$ref": "#/definitions/github_com_chainlaunch_chainlaunch_pkg_networks_http.ErrorResponse" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/github_com_chainlaunch_chainlaunch_pkg_networks_http.ErrorResponse" + } + } + } + } + }, "/networks/fabric/{id}/channel-config": { "get": { "description": "Retrieve the channel configuration for a Fabric network", @@ -2153,7 +2272,7 @@ const docTemplate = `{ "application/json" ], "tags": [ - "fabric-networks" + "Fabric Networks" ], "summary": "Get Fabric network channel configuration", "parameters": [ @@ -2194,7 +2313,7 @@ const docTemplate = `{ "application/json" ], "tags": [ - "fabric-networks" + "Fabric Networks" ], "summary": "Get Fabric network current channel configuration", "parameters": [ @@ -2228,6 +2347,56 @@ const docTemplate = `{ } } }, + "/networks/fabric/{id}/info": { + "get": { + "description": "Retrieve detailed information about the Fabric blockchain including height and block hashes", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "Fabric Networks" + ], + "summary": "Get Fabric chain information", + "parameters": [ + { + "type": "integer", + "description": "Network ID", + "name": "id", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/http.ChainInfoResponse" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/github_com_chainlaunch_chainlaunch_pkg_networks_http.ErrorResponse" + } + }, + "404": { + "description": "Not Found", + "schema": { + "$ref": "#/definitions/github_com_chainlaunch_chainlaunch_pkg_networks_http.ErrorResponse" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/github_com_chainlaunch_chainlaunch_pkg_networks_http.ErrorResponse" + } + } + } + } + }, "/networks/fabric/{id}/nodes": { "get": { "description": "Get all nodes associated with a network", @@ -2235,7 +2404,7 @@ const docTemplate = `{ "application/json" ], "tags": [ - "fabric-networks" + "Fabric Networks" ], "summary": "Get network nodes", "parameters": [ @@ -2283,7 +2452,7 @@ const docTemplate = `{ "application/json" ], "tags": [ - "fabric-networks" + "Fabric Networks" ], "summary": "Add node to network", "parameters": [ @@ -2336,7 +2505,7 @@ const docTemplate = `{ "application/json" ], "tags": [ - "fabric-networks" + "Fabric Networks" ], "summary": "Remove orderer from Fabric network", "parameters": [ @@ -2387,7 +2556,7 @@ const docTemplate = `{ "application/json" ], "tags": [ - "fabric-networks" + "Fabric Networks" ], "summary": "Join orderer to Fabric network", "parameters": [ @@ -2438,7 +2607,7 @@ const docTemplate = `{ "application/json" ], "tags": [ - "fabric-networks" + "Fabric Networks" ], "summary": "Unjoin orderer from Fabric network", "parameters": [ @@ -2479,6 +2648,65 @@ const docTemplate = `{ } } }, + "/networks/fabric/{id}/organization-crl": { + "post": { + "description": "Update the Certificate Revocation List (CRL) for an organization in the network", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "Fabric Networks" + ], + "summary": "Update organization CRL", + "parameters": [ + { + "type": "integer", + "description": "Network ID", + "name": "id", + "in": "path", + "required": true + }, + { + "description": "Organization CRL update request", + "name": "request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/http.UpdateOrganizationCRLRequest" + } + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/http.UpdateOrganizationCRLResponse" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/github_com_chainlaunch_chainlaunch_pkg_networks_http.ErrorResponse" + } + }, + "404": { + "description": "Not Found", + "schema": { + "$ref": "#/definitions/github_com_chainlaunch_chainlaunch_pkg_networks_http.ErrorResponse" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/github_com_chainlaunch_chainlaunch_pkg_networks_http.ErrorResponse" + } + } + } + } + }, "/networks/fabric/{id}/organizations/{orgId}/config": { "get": { "description": "Get the network configuration as YAML", @@ -2486,7 +2714,7 @@ const docTemplate = `{ "text/yaml" ], "tags": [ - "fabric-networks" + "Fabric Networks" ], "summary": "Get network configuration", "parameters": [ @@ -2543,7 +2771,7 @@ const docTemplate = `{ "application/json" ], "tags": [ - "fabric-networks" + "Fabric Networks" ], "summary": "Remove peer from Fabric network", "parameters": [ @@ -2594,7 +2822,7 @@ const docTemplate = `{ "application/json" ], "tags": [ - "fabric-networks" + "Fabric Networks" ], "summary": "Join peer to Fabric network", "parameters": [ @@ -2645,7 +2873,7 @@ const docTemplate = `{ "application/json" ], "tags": [ - "fabric-networks" + "Fabric Networks" ], "summary": "Unjoin peer from Fabric network", "parameters": [ @@ -2696,7 +2924,7 @@ const docTemplate = `{ "application/json" ], "tags": [ - "fabric-networks" + "Fabric Networks" ], "summary": "Reload network config block", "parameters": [ @@ -2748,64 +2976,63 @@ const docTemplate = `{ } } }, - "/nodes": { + "/networks/fabric/{id}/transactions/{txId}": { "get": { - "description": "Get a paginated list of nodes with optional platform filter", - "consumes": [ - "application/json" - ], + "description": "Get detailed information about a specific transaction in a Fabric network", "produces": [ "application/json" ], "tags": [ - "nodes" + "Fabric Networks" ], - "summary": "List all nodes", + "summary": "Get transaction details by transaction ID", "parameters": [ - { - "type": "string", - "description": "Filter by blockchain platform", - "name": "platform", - "in": "query" - }, { "type": "integer", - "default": 1, - "description": "Page number", - "name": "page", - "in": "query" + "description": "Network ID", + "name": "id", + "in": "path", + "required": true }, { - "type": "integer", - "default": 10, - "description": "Items per page", - "name": "limit", - "in": "query" + "type": "string", + "description": "Transaction ID", + "name": "txId", + "in": "path", + "required": true } ], "responses": { "200": { "description": "OK", "schema": { - "$ref": "#/definitions/http.PaginatedNodesResponse" + "$ref": "#/definitions/http.TransactionResponse" } }, "400": { - "description": "Validation error", + "description": "Bad Request", "schema": { - "$ref": "#/definitions/response.ErrorResponse" + "$ref": "#/definitions/github_com_chainlaunch_chainlaunch_pkg_networks_http.ErrorResponse" + } + }, + "404": { + "description": "Not Found", + "schema": { + "$ref": "#/definitions/github_com_chainlaunch_chainlaunch_pkg_networks_http.ErrorResponse" } }, "500": { - "description": "Internal server error", + "description": "Internal Server Error", "schema": { - "$ref": "#/definitions/response.ErrorResponse" + "$ref": "#/definitions/github_com_chainlaunch_chainlaunch_pkg_networks_http.ErrorResponse" } } } - }, + } + }, + "/networks/fabric/{id}/update-config": { "post": { - "description": "Create a new node with the specified configuration", + "description": "Prepare a config update proposal for a Fabric network using the provided operations.\nThe following operation types are supported:\n- add_org: Add a new organization to the channel\n- remove_org: Remove an organization from the channel\n- update_org_msp: Update an organization's MSP configuration\n- set_anchor_peers: Set anchor peers for an organization\n- add_consenter: Add a new consenter to the orderer\n- remove_consenter: Remove a consenter from the orderer\n- update_consenter: Update a consenter in the orderer\n- update_etcd_raft_options: Update etcd raft options for the orderer\n- update_batch_size: Update batch size for the orderer\n- update_batch_timeout: Update batch timeout for the orderer", "consumes": [ "application/json" ], @@ -2813,15 +3040,123 @@ const docTemplate = `{ "application/json" ], "tags": [ - "nodes" + "Fabric Networks" ], - "summary": "Create a new node", + "summary": "Prepare a config update for a Fabric network", "parameters": [ { - "description": "Node creation request", - "name": "request", - "in": "body", - "required": true, + "type": "integer", + "description": "Network ID", + "name": "id", + "in": "path", + "required": true + }, + { + "description": "Config update operations", + "name": "request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/http.UpdateFabricNetworkRequest" + } + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/http.ConfigUpdateResponse" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/github_com_chainlaunch_chainlaunch_pkg_networks_http.ErrorResponse" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/github_com_chainlaunch_chainlaunch_pkg_networks_http.ErrorResponse" + } + } + } + } + }, + "/nodes": { + "get": { + "description": "Get a paginated list of nodes with optional platform filter", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "Nodes" + ], + "summary": "List all nodes", + "parameters": [ + { + "type": "string", + "description": "Filter by blockchain platform", + "name": "platform", + "in": "query" + }, + { + "type": "integer", + "default": 1, + "description": "Page number", + "name": "page", + "in": "query" + }, + { + "type": "integer", + "default": 10, + "description": "Items per page", + "name": "limit", + "in": "query" + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/http.PaginatedNodesResponse" + } + }, + "400": { + "description": "Validation error", + "schema": { + "$ref": "#/definitions/response.ErrorResponse" + } + }, + "500": { + "description": "Internal server error", + "schema": { + "$ref": "#/definitions/response.ErrorResponse" + } + } + } + }, + "post": { + "description": "Create a new node with the specified configuration", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "Nodes" + ], + "summary": "Create a new node", + "parameters": [ + { + "description": "Node creation request", + "name": "request", + "in": "body", + "required": true, "schema": { "$ref": "#/definitions/http.CreateNodeRequest" } @@ -2856,14 +3191,27 @@ const docTemplate = `{ "application/json" ], "tags": [ - "nodes" + "Nodes" ], "summary": "Get default values for Besu node", + "parameters": [ + { + "minimum": 0, + "type": "integer", + "default": 1, + "description": "Number of Besu nodes", + "name": "besuNodes", + "in": "query" + } + ], "responses": { "200": { "description": "OK", "schema": { - "$ref": "#/definitions/service.BesuNodeDefaults" + "type": "array", + "items": { + "$ref": "#/definitions/http.BesuNodeDefaultsResponse" + } } }, "500": { @@ -2882,7 +3230,7 @@ const docTemplate = `{ "application/json" ], "tags": [ - "nodes" + "Nodes" ], "summary": "Get default values for multiple Fabric nodes", "parameters": [ @@ -2943,7 +3291,7 @@ const docTemplate = `{ "application/json" ], "tags": [ - "nodes" + "Nodes" ], "summary": "Get default values for Fabric orderer node", "responses": { @@ -2969,7 +3317,7 @@ const docTemplate = `{ "application/json" ], "tags": [ - "nodes" + "Nodes" ], "summary": "Get default values for Fabric peer node", "responses": { @@ -2998,7 +3346,7 @@ const docTemplate = `{ "application/json" ], "tags": [ - "nodes" + "Nodes" ], "summary": "List nodes by platform", "parameters": [ @@ -3060,7 +3408,7 @@ const docTemplate = `{ "application/json" ], "tags": [ - "nodes" + "Nodes" ], "summary": "Get a node", "parameters": [ @@ -3099,8 +3447,8 @@ const docTemplate = `{ } } }, - "delete": { - "description": "Delete a node by ID", + "put": { + "description": "Updates an existing node's configuration based on its type", "consumes": [ "application/json" ], @@ -3108,9 +3456,9 @@ const docTemplate = `{ "application/json" ], "tags": [ - "nodes" + "Nodes" ], - "summary": "Delete a node", + "summary": "Update a node", "parameters": [ { "type": "integer", @@ -3118,11 +3466,23 @@ const docTemplate = `{ "name": "id", "in": "path", "required": true + }, + { + "description": "Update node request", + "name": "request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/http.UpdateNodeRequest" + } } ], "responses": { - "204": { - "description": "No Content" + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/http.NodeResponse" + } }, "400": { "description": "Validation error", @@ -3143,11 +3503,9 @@ const docTemplate = `{ } } } - } - }, - "/nodes/{id}/events": { - "get": { - "description": "Get a paginated list of events for a specific node", + }, + "delete": { + "description": "Delete a node by ID", "consumes": [ "application/json" ], @@ -3155,9 +3513,9 @@ const docTemplate = `{ "application/json" ], "tags": [ - "nodes" + "Nodes" ], - "summary": "Get node events", + "summary": "Delete a node", "parameters": [ { "type": "integer", @@ -3165,28 +3523,11 @@ const docTemplate = `{ "name": "id", "in": "path", "required": true - }, - { - "type": "integer", - "default": 1, - "description": "Page number", - "name": "page", - "in": "query" - }, - { - "type": "integer", - "default": 10, - "description": "Items per page", - "name": "limit", - "in": "query" } ], "responses": { - "200": { - "description": "OK", - "schema": { - "$ref": "#/definitions/http.PaginatedNodeEventsResponse" - } + "204": { + "description": "No Content" }, "400": { "description": "Validation error", @@ -3209,19 +3550,19 @@ const docTemplate = `{ } } }, - "/nodes/{id}/logs": { - "get": { - "description": "Stream logs from a specific node", + "/nodes/{id}/certificates/renew": { + "post": { + "description": "Renews the TLS and signing certificates for a Fabric node", "consumes": [ "application/json" ], "produces": [ - "text/event-stream" + "application/json" ], "tags": [ - "nodes" + "Nodes" ], - "summary": "Tail node logs", + "summary": "Renew node certificates", "parameters": [ { "type": "integer", @@ -3229,27 +3570,13 @@ const docTemplate = `{ "name": "id", "in": "path", "required": true - }, - { - "type": "boolean", - "default": false, - "description": "Follow logs", - "name": "follow", - "in": "query" - }, - { - "type": "integer", - "default": 100, - "description": "Number of lines to show from the end", - "name": "tail", - "in": "query" } ], "responses": { "200": { - "description": "Log stream", + "description": "OK", "schema": { - "type": "string" + "$ref": "#/definitions/http.NodeResponse" } }, "400": { @@ -3273,9 +3600,9 @@ const docTemplate = `{ } } }, - "/nodes/{id}/restart": { - "post": { - "description": "Restart a node by ID (stops and starts the node)", + "/nodes/{id}/channels": { + "get": { + "description": "Retrieves all channels for a specific Fabric node", "consumes": [ "application/json" ], @@ -3283,9 +3610,9 @@ const docTemplate = `{ "application/json" ], "tags": [ - "nodes" + "Nodes" ], - "summary": "Restart a node", + "summary": "Get channels for a Fabric node", "parameters": [ { "type": "integer", @@ -3299,7 +3626,7 @@ const docTemplate = `{ "200": { "description": "OK", "schema": { - "$ref": "#/definitions/http.NodeResponse" + "$ref": "#/definitions/http.NodeChannelsResponse" } }, "400": { @@ -3323,9 +3650,9 @@ const docTemplate = `{ } } }, - "/nodes/{id}/start": { - "post": { - "description": "Start a node by ID", + "/nodes/{id}/events": { + "get": { + "description": "Get a paginated list of events for a specific node", "consumes": [ "application/json" ], @@ -3333,9 +3660,9 @@ const docTemplate = `{ "application/json" ], "tags": [ - "nodes" + "Nodes" ], - "summary": "Start a node", + "summary": "Get node events", "parameters": [ { "type": "integer", @@ -3343,13 +3670,27 @@ const docTemplate = `{ "name": "id", "in": "path", "required": true + }, + { + "type": "integer", + "default": 1, + "description": "Page number", + "name": "page", + "in": "query" + }, + { + "type": "integer", + "default": 10, + "description": "Items per page", + "name": "limit", + "in": "query" } ], "responses": { "200": { "description": "OK", "schema": { - "$ref": "#/definitions/http.NodeResponse" + "$ref": "#/definitions/http.PaginatedNodeEventsResponse" } }, "400": { @@ -3373,19 +3714,19 @@ const docTemplate = `{ } } }, - "/nodes/{id}/stop": { - "post": { - "description": "Stop a node by ID", + "/nodes/{id}/logs": { + "get": { + "description": "Stream logs from a specific node", "consumes": [ "application/json" ], "produces": [ - "application/json" + "text/event-stream" ], "tags": [ - "nodes" + "Nodes" ], - "summary": "Stop a node", + "summary": "Tail node logs", "parameters": [ { "type": "integer", @@ -3393,13 +3734,27 @@ const docTemplate = `{ "name": "id", "in": "path", "required": true + }, + { + "type": "boolean", + "default": false, + "description": "Follow logs", + "name": "follow", + "in": "query" + }, + { + "type": "integer", + "default": 100, + "description": "Number of lines to show from the end", + "name": "tail", + "in": "query" } ], "responses": { "200": { - "description": "OK", + "description": "Log stream", "schema": { - "$ref": "#/definitions/http.NodeResponse" + "type": "string" } }, "400": { @@ -3423,9 +3778,9 @@ const docTemplate = `{ } } }, - "/notifications/providers": { - "get": { - "description": "Get a list of all notification providers", + "/nodes/{id}/restart": { + "post": { + "description": "Restart a node by ID (stops and starts the node)", "consumes": [ "application/json" ], @@ -3433,7 +3788,157 @@ const docTemplate = `{ "application/json" ], "tags": [ - "notifications" + "Nodes" + ], + "summary": "Restart a node", + "parameters": [ + { + "type": "integer", + "description": "Node ID", + "name": "id", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/http.NodeResponse" + } + }, + "400": { + "description": "Validation error", + "schema": { + "$ref": "#/definitions/response.ErrorResponse" + } + }, + "404": { + "description": "Node not found", + "schema": { + "$ref": "#/definitions/response.ErrorResponse" + } + }, + "500": { + "description": "Internal server error", + "schema": { + "$ref": "#/definitions/response.ErrorResponse" + } + } + } + } + }, + "/nodes/{id}/start": { + "post": { + "description": "Start a node by ID", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "Nodes" + ], + "summary": "Start a node", + "parameters": [ + { + "type": "integer", + "description": "Node ID", + "name": "id", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/http.NodeResponse" + } + }, + "400": { + "description": "Validation error", + "schema": { + "$ref": "#/definitions/response.ErrorResponse" + } + }, + "404": { + "description": "Node not found", + "schema": { + "$ref": "#/definitions/response.ErrorResponse" + } + }, + "500": { + "description": "Internal server error", + "schema": { + "$ref": "#/definitions/response.ErrorResponse" + } + } + } + } + }, + "/nodes/{id}/stop": { + "post": { + "description": "Stop a node by ID", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "Nodes" + ], + "summary": "Stop a node", + "parameters": [ + { + "type": "integer", + "description": "Node ID", + "name": "id", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/http.NodeResponse" + } + }, + "400": { + "description": "Validation error", + "schema": { + "$ref": "#/definitions/response.ErrorResponse" + } + }, + "404": { + "description": "Node not found", + "schema": { + "$ref": "#/definitions/response.ErrorResponse" + } + }, + "500": { + "description": "Internal server error", + "schema": { + "$ref": "#/definitions/response.ErrorResponse" + } + } + } + } + }, + "/notifications/providers": { + "get": { + "description": "Get a list of all notification providers", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "Notifications" ], "summary": "List notification providers", "responses": { @@ -3463,7 +3968,7 @@ const docTemplate = `{ "application/json" ], "tags": [ - "notifications" + "Notifications" ], "summary": "Create a notification provider", "parameters": [ @@ -3509,7 +4014,7 @@ const docTemplate = `{ "application/json" ], "tags": [ - "notifications" + "Notifications" ], "summary": "Get a notification provider", "parameters": [ @@ -3557,7 +4062,7 @@ const docTemplate = `{ "application/json" ], "tags": [ - "notifications" + "Notifications" ], "summary": "Update a notification provider", "parameters": [ @@ -3614,7 +4119,7 @@ const docTemplate = `{ "application/json" ], "tags": [ - "notifications" + "Notifications" ], "summary": "Delete a notification provider", "parameters": [ @@ -3661,7 +4166,7 @@ const docTemplate = `{ "application/json" ], "tags": [ - "notifications" + "Notifications" ], "summary": "Test a notification provider", "parameters": [ @@ -3714,7 +4219,7 @@ const docTemplate = `{ "application/json" ], "tags": [ - "organizations" + "Organizations" ], "summary": "List all Fabric organizations", "responses": { @@ -3747,7 +4252,7 @@ const docTemplate = `{ "application/json" ], "tags": [ - "organizations" + "Organizations" ], "summary": "Create a new Fabric organization", "parameters": [ @@ -3799,7 +4304,7 @@ const docTemplate = `{ "application/json" ], "tags": [ - "organizations" + "Organizations" ], "summary": "Get a Fabric organization by MSP ID", "parameters": [ @@ -3849,7 +4354,7 @@ const docTemplate = `{ "application/json" ], "tags": [ - "organizations" + "Organizations" ], "summary": "Get a Fabric organization", "parameters": [ @@ -3897,7 +4402,7 @@ const docTemplate = `{ "application/json" ], "tags": [ - "organizations" + "Organizations" ], "summary": "Update a Fabric organization", "parameters": [ @@ -3963,7 +4468,7 @@ const docTemplate = `{ "application/json" ], "tags": [ - "organizations" + "Organizations" ], "summary": "Delete a Fabric organization", "parameters": [ @@ -3999,117 +4504,405 @@ const docTemplate = `{ } } } - } - }, - "definitions": { - "auth.LoginRequest": { - "type": "object", - "properties": { - "password": { - "type": "string" - }, - "username": { - "type": "string" - } - } }, - "auth.LoginResponse": { - "description": "Login response", - "type": "object", - "properties": { - "message": { - "description": "Success message\n@Example \"Login successful\"", - "type": "string" - } - } - }, - "auth.LogoutResponse": { - "description": "Logout response", - "type": "object", - "properties": { - "message": { - "description": "Success message\n@Example \"Logout successful\"", - "type": "string" - } - } - }, - "auth.UserResponse": { - "description": "User information response", - "type": "object", - "properties": { - "created_at": { - "description": "Time when the user was created\n@Example \"2024-01-01T00:00:00Z\"", - "type": "string" - }, - "last_login_at": { - "description": "Last time the user logged in\n@Example \"2024-01-01T12:34:56Z\"", - "type": "string" - }, - "username": { - "description": "Username of the user\n@Example \"admin\"", - "type": "string" + "/organizations/{id}/crl": { + "get": { + "description": "Get the current Certificate Revocation List for the organization", + "consumes": [ + "application/json" + ], + "produces": [ + "application/x-pem-file" + ], + "tags": [ + "Organizations" + ], + "summary": "Get organization's CRL", + "parameters": [ + { + "type": "integer", + "description": "Organization ID", + "name": "id", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "PEM encoded CRL", + "schema": { + "type": "string" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "type": "object", + "additionalProperties": { + "type": "string" + } + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "type": "object", + "additionalProperties": { + "type": "string" + } + } + } } } }, - "crypto_x509.ExtKeyUsage": { - "type": "integer", - "enum": [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13 - ], - "x-enum-varnames": [ - "ExtKeyUsageAny", - "ExtKeyUsageServerAuth", - "ExtKeyUsageClientAuth", - "ExtKeyUsageCodeSigning", - "ExtKeyUsageEmailProtection", - "ExtKeyUsageIPSECEndSystem", - "ExtKeyUsageIPSECTunnel", - "ExtKeyUsageIPSECUser", - "ExtKeyUsageTimeStamping", - "ExtKeyUsageOCSPSigning", - "ExtKeyUsageMicrosoftServerGatedCrypto", - "ExtKeyUsageNetscapeServerGatedCrypto", - "ExtKeyUsageMicrosoftCommercialCodeSigning", - "ExtKeyUsageMicrosoftKernelCodeSigning" - ] + "/organizations/{id}/crl/revoke/pem": { + "post": { + "description": "Add a certificate to the organization's CRL using its PEM encoded data", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "Organizations" + ], + "summary": "Revoke a certificate using PEM data", + "parameters": [ + { + "type": "integer", + "description": "Organization ID", + "name": "id", + "in": "path", + "required": true + }, + { + "description": "Certificate revocation request", + "name": "request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/handler.RevokeCertificateByPEMRequest" + } + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "type": "object", + "additionalProperties": { + "type": "string" + } + } + }, + "400": { + "description": "Bad Request", + "schema": { + "type": "object", + "additionalProperties": { + "type": "string" + } + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "type": "object", + "additionalProperties": { + "type": "string" + } + } + } + } + } }, - "crypto_x509.KeyUsage": { - "type": "integer", - "enum": [ - 1, - 2, - 4, - 8, - 16, - 32, - 64, - 128, - 256 - ], - "x-enum-varnames": [ - "KeyUsageDigitalSignature", - "KeyUsageContentCommitment", - "KeyUsageKeyEncipherment", - "KeyUsageDataEncipherment", - "KeyUsageKeyAgreement", - "KeyUsageCertSign", - "KeyUsageCRLSign", - "KeyUsageEncipherOnly", - "KeyUsageDecipherOnly" - ] + "/organizations/{id}/crl/revoke/serial": { + "post": { + "description": "Add a certificate to the organization's CRL using its serial number", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "Organizations" + ], + "summary": "Revoke a certificate using its serial number", + "parameters": [ + { + "type": "integer", + "description": "Organization ID", + "name": "id", + "in": "path", + "required": true + }, + { + "description": "Certificate revocation request", + "name": "request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/handler.RevokeCertificateBySerialRequest" + } + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "type": "object", + "additionalProperties": { + "type": "string" + } + } + }, + "400": { + "description": "Bad Request", + "schema": { + "type": "object", + "additionalProperties": { + "type": "string" + } + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "type": "object", + "additionalProperties": { + "type": "string" + } + } + } + } + }, + "delete": { + "description": "Remove a certificate from the organization's CRL using its serial number", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "Organizations" + ], + "summary": "Delete a revoked certificate using its serial number", + "parameters": [ + { + "type": "integer", + "description": "Organization ID", + "name": "id", + "in": "path", + "required": true + }, + { + "description": "Certificate deletion request", + "name": "request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/handler.DeleteRevokedCertificateRequest" + } + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "type": "object", + "additionalProperties": { + "type": "string" + } + } + }, + "400": { + "description": "Bad Request", + "schema": { + "type": "object", + "additionalProperties": { + "type": "string" + } + } + }, + "404": { + "description": "Not Found", + "schema": { + "type": "object", + "additionalProperties": { + "type": "string" + } + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "type": "object", + "additionalProperties": { + "type": "string" + } + } + } + } + } + }, + "/organizations/{id}/revoked-certificates": { + "get": { + "description": "Get all revoked certificates for the organization", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "Organizations" + ], + "summary": "Get organization's revoked certificates", + "parameters": [ + { + "type": "integer", + "description": "Organization ID", + "name": "id", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/handler.RevokedCertificateResponse" + } + } + }, + "400": { + "description": "Bad Request", + "schema": { + "type": "object", + "additionalProperties": { + "type": "string" + } + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "type": "object", + "additionalProperties": { + "type": "string" + } + } + } + } + } + }, + "/settings": { + "get": { + "description": "Get the default setting's details", + "produces": [ + "application/json" + ], + "tags": [ + "Settings" + ], + "summary": "Get the default setting", + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/service.Setting" + } + } + } + }, + "post": { + "description": "Create or update the default setting with the provided configuration", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "Settings" + ], + "summary": "Create or update the default setting", + "parameters": [ + { + "description": "Setting configuration", + "name": "setting", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/service.CreateSettingParams" + } + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/service.Setting" + } + } + } + } + } + }, + "definitions": { + "auth.LoginRequest": { + "type": "object", + "properties": { + "password": { + "type": "string" + }, + "username": { + "type": "string" + } + } + }, + "auth.LoginResponse": { + "description": "Login response", + "type": "object", + "properties": { + "message": { + "description": "Success message\n@Example \"Login successful\"", + "type": "string" + } + } + }, + "auth.LogoutResponse": { + "description": "Logout response", + "type": "object", + "properties": { + "message": { + "description": "Success message\n@Example \"Logout successful\"", + "type": "string" + } + } + }, + "auth.UserResponse": { + "description": "User information response", + "type": "object", + "properties": { + "created_at": { + "description": "Time when the user was created\n@Example \"2024-01-01T00:00:00Z\"", + "type": "string" + }, + "last_login_at": { + "description": "Last time the user logged in\n@Example \"2024-01-01T12:34:56Z\"", + "type": "string" + }, + "username": { + "description": "Username of the user\n@Example \"admin\"", + "type": "string" + } + } }, "github_com_chainlaunch_chainlaunch_pkg_networks_http.ErrorResponse": { "type": "object", @@ -4154,6 +4947,15 @@ const docTemplate = `{ } } }, + "handler.DeleteRevokedCertificateRequest": { + "type": "object", + "properties": { + "serialNumber": { + "description": "Hex string of the serial number", + "type": "string" + } + } + }, "handler.OrganizationResponse": { "type": "object", "properties": { @@ -4192,6 +4994,44 @@ const docTemplate = `{ } } }, + "handler.RevokeCertificateByPEMRequest": { + "type": "object", + "properties": { + "certificate": { + "description": "PEM encoded certificate", + "type": "string" + }, + "revocationReason": { + "type": "integer" + } + } + }, + "handler.RevokeCertificateBySerialRequest": { + "type": "object", + "properties": { + "revocationReason": { + "type": "integer" + }, + "serialNumber": { + "description": "Hex string of the serial number", + "type": "string" + } + } + }, + "handler.RevokedCertificateResponse": { + "type": "object", + "properties": { + "reason": { + "type": "integer" + }, + "revocationTime": { + "type": "string" + }, + "serialNumber": { + "type": "string" + } + } + }, "handler.UpdateOrganizationRequest": { "type": "object", "properties": { @@ -4352,98 +5192,230 @@ const docTemplate = `{ "targetId": { "type": "integer" }, - "updatedAt": { + "updatedAt": { + "type": "string" + } + } + }, + "http.BackupTargetResponse": { + "type": "object", + "properties": { + "accessKeyId": { + "type": "string" + }, + "bucketName": { + "type": "string" + }, + "bucketPath": { + "type": "string" + }, + "createdAt": { + "type": "string" + }, + "endpoint": { + "type": "string" + }, + "forcePathStyle": { + "type": "boolean" + }, + "id": { + "type": "integer" + }, + "name": { + "type": "string" + }, + "region": { + "type": "string" + }, + "type": { + "type": "string" + }, + "updatedAt": { + "type": "string" + } + } + }, + "http.BesuNetworkResponse": { + "type": "object", + "properties": { + "chainId": { + "type": "integer" + }, + "config": { + "type": "array", + "items": { + "type": "integer" + } + }, + "createdAt": { + "type": "string" + }, + "description": { + "type": "string" + }, + "genesisConfig": { + "type": "string" + }, + "id": { + "type": "integer" + }, + "name": { + "type": "string" + }, + "platform": { + "type": "string" + }, + "status": { + "type": "string" + }, + "updatedAt": { + "type": "string" + } + } + }, + "http.BesuNodeDefaultsResponse": { + "type": "object", + "properties": { + "defaults": { + "type": "array", + "items": { + "$ref": "#/definitions/service.BesuNodeDefaults" + } + }, + "nodeCount": { + "type": "integer" + } + } + }, + "http.BlockListResponse": { + "type": "object", + "properties": { + "blocks": { + "type": "array", + "items": { + "$ref": "#/definitions/service.Block" + } + }, + "total": { + "type": "integer" + } + } + }, + "http.BlockTransactionsResponse": { + "type": "object", + "properties": { + "block": { + "$ref": "#/definitions/service.Block" + }, + "transactions": { + "type": "array", + "items": { + "$ref": "#/definitions/service.Transaction" + } + } + } + }, + "http.ChainInfoResponse": { + "type": "object", + "properties": { + "currentBlockHash": { + "type": "string" + }, + "height": { + "type": "integer" + }, + "previousBlockHash": { "type": "string" } } }, - "http.BackupTargetResponse": { + "http.ChannelConfigResponse": { "type": "object", "properties": { - "accessKeyId": { - "type": "string" - }, - "bucketName": { - "type": "string" + "config": { + "type": "object", + "additionalProperties": true }, - "bucketPath": { + "name": { "type": "string" + } + } + }, + "http.ChannelResponse": { + "type": "object", + "properties": { + "blockNum": { + "type": "integer" }, "createdAt": { "type": "string" }, - "endpoint": { - "type": "string" - }, - "forcePathStyle": { - "type": "boolean" - }, - "id": { - "type": "integer" - }, "name": { "type": "string" - }, - "region": { - "type": "string" - }, - "type": { - "type": "string" - }, - "updatedAt": { - "type": "string" } } }, - "http.BesuNetworkResponse": { + "http.ConfigUpdateOperationRequest": { + "description": "A single configuration update operation", "type": "object", + "required": [ + "payload", + "type" + ], "properties": { - "chainId": { - "type": "integer" - }, - "config": { + "payload": { + "description": "Payload contains the operation-specific data\nThe structure depends on the operation type:\n- add_org: AddOrgPayload\n- remove_org: RemoveOrgPayload\n- update_org_msp: UpdateOrgMSPPayload\n- set_anchor_peers: SetAnchorPeersPayload\n- add_consenter: AddConsenterPayload\n- remove_consenter: RemoveConsenterPayload\n- update_consenter: UpdateConsenterPayload\n- update_etcd_raft_options: UpdateEtcdRaftOptionsPayload\n- update_batch_size: UpdateBatchSizePayload\n- update_batch_timeout: UpdateBatchTimeoutPayload\n@Description The payload for the configuration update operation\n@Description Can be one of:\n@Description - AddOrgPayload when type is \"add_org\"\n@Description - RemoveOrgPayload when type is \"remove_org\"\n@Description - UpdateOrgMSPPayload when type is \"update_org_msp\"\n@Description - SetAnchorPeersPayload when type is \"set_anchor_peers\"\n@Description - AddConsenterPayload when type is \"add_consenter\"\n@Description - RemoveConsenterPayload when type is \"remove_consenter\"\n@Description - UpdateConsenterPayload when type is \"update_consenter\"\n@Description - UpdateEtcdRaftOptionsPayload when type is \"update_etcd_raft_options\"\n@Description - UpdateBatchSizePayload when type is \"update_batch_size\"\n@Description - UpdateBatchTimeoutPayload when type is \"update_batch_timeout\"", "type": "array", "items": { "type": "integer" } }, - "createdAt": { + "type": { + "description": "Type is the type of configuration update operation\nenum: add_org,remove_org,update_org_msp,set_anchor_peers,add_consenter,remove_consenter,update_consenter,update_etcd_raft_options,update_batch_size,update_batch_timeout", + "type": "string", + "enum": [ + "add_org", + "remove_org", + "update_org_msp", + "set_anchor_peers", + "add_consenter", + "remove_consenter", + "update_consenter", + "update_etcd_raft_options", + "update_batch_size", + "update_batch_timeout" + ] + } + } + }, + "http.ConfigUpdateResponse": { + "type": "object", + "properties": { + "channel_name": { "type": "string" }, - "description": { + "created_at": { "type": "string" }, - "genesisConfig": { - "type": "array", - "items": { - "type": "integer" - } + "created_by": { + "type": "string" }, "id": { - "type": "integer" - }, - "name": { "type": "string" }, - "platform": { - "type": "string" + "network_id": { + "type": "integer" }, - "status": { - "type": "string" + "operations": { + "type": "array", + "items": { + "$ref": "#/definitions/http.ConfigUpdateOperationRequest" + } }, - "updatedAt": { + "preview_json": { "type": "string" - } - } - }, - "http.ChannelConfigResponse": { - "type": "object", - "properties": { - "config": { - "type": "object", - "additionalProperties": true }, - "name": { + "status": { "type": "string" } } @@ -4973,6 +5945,20 @@ const docTemplate = `{ } } }, + "http.NodeChannelsResponse": { + "type": "object", + "properties": { + "channels": { + "type": "array", + "items": { + "$ref": "#/definitions/http.ChannelResponse" + } + }, + "nodeId": { + "type": "integer" + } + } + }, "http.NodeEventResponse": { "type": "object", "properties": { @@ -5003,6 +5989,9 @@ const docTemplate = `{ "endpoint": { "type": "string" }, + "errorMessage": { + "type": "string" + }, "fabricOrderer": { "$ref": "#/definitions/service.FabricOrdererProperties" }, @@ -5246,6 +6235,14 @@ const docTemplate = `{ } } }, + "http.TransactionResponse": { + "type": "object", + "properties": { + "transaction": { + "$ref": "#/definitions/service.Transaction" + } + } + }, "http.UpdateBackupScheduleRequest": { "type": "object", "required": [ @@ -5350,6 +6347,51 @@ const docTemplate = `{ } } }, + "http.UpdateBesuNodeRequest": { + "type": "object", + "required": [ + "networkId", + "p2pHost", + "p2pPort", + "rpcHost", + "rpcPort" + ], + "properties": { + "bootnodes": { + "type": "array", + "items": { + "type": "string" + } + }, + "env": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "externalIp": { + "type": "string" + }, + "internalIp": { + "type": "string" + }, + "networkId": { + "type": "integer" + }, + "p2pHost": { + "type": "string" + }, + "p2pPort": { + "type": "integer" + }, + "rpcHost": { + "type": "string" + }, + "rpcPort": { + "type": "integer" + } + } + }, "http.UpdateConsenterPayload": { "type": "object", "required": [ @@ -5380,38 +6422,152 @@ const docTemplate = `{ "maximum": 65535, "minimum": 1 }, - "server_tls_cert": { + "server_tls_cert": { + "type": "string" + } + } + }, + "http.UpdateEtcdRaftOptionsPayload": { + "type": "object", + "required": [ + "election_tick", + "heartbeat_tick", + "max_inflight_blocks", + "snapshot_interval_size", + "tick_interval" + ], + "properties": { + "election_tick": { + "type": "integer", + "minimum": 1 + }, + "heartbeat_tick": { + "type": "integer", + "minimum": 1 + }, + "max_inflight_blocks": { + "type": "integer", + "minimum": 1 + }, + "snapshot_interval_size": { + "type": "integer", + "minimum": 1 + }, + "tick_interval": { + "type": "string" + } + } + }, + "http.UpdateFabricNetworkRequest": { + "type": "object", + "required": [ + "operations" + ], + "properties": { + "operations": { + "type": "array", + "minItems": 1, + "items": { + "$ref": "#/definitions/http.ConfigUpdateOperationRequest" + } + } + } + }, + "http.UpdateFabricOrdererRequest": { + "type": "object", + "properties": { + "adminAddress": { + "type": "string" + }, + "domainNames": { + "type": "array", + "items": { + "type": "string" + } + }, + "env": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "externalEndpoint": { + "type": "string" + }, + "listenAddress": { + "type": "string" + }, + "operationsListenAddress": { + "type": "string" + }, + "version": { + "type": "string" + } + } + }, + "http.UpdateFabricPeerRequest": { + "type": "object", + "properties": { + "addressOverrides": { + "type": "array", + "items": { + "$ref": "#/definitions/types.AddressOverride" + } + }, + "chaincodeAddress": { + "type": "string" + }, + "domainNames": { + "type": "array", + "items": { + "type": "string" + } + }, + "env": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "eventsAddress": { + "type": "string" + }, + "externalEndpoint": { + "type": "string" + }, + "listenAddress": { + "type": "string" + }, + "operationsListenAddress": { + "type": "string" + }, + "version": { "type": "string" } } }, - "http.UpdateEtcdRaftOptionsPayload": { + "http.UpdateNodeRequest": { "type": "object", - "required": [ - "election_tick", - "heartbeat_tick", - "max_inflight_blocks", - "snapshot_interval_size", - "tick_interval" - ], "properties": { - "election_tick": { - "type": "integer", - "minimum": 1 + "besuNode": { + "$ref": "#/definitions/http.UpdateBesuNodeRequest" }, - "heartbeat_tick": { - "type": "integer", - "minimum": 1 + "blockchainPlatform": { + "$ref": "#/definitions/types.BlockchainPlatform" }, - "max_inflight_blocks": { - "type": "integer", - "minimum": 1 + "fabricOrderer": { + "$ref": "#/definitions/http.UpdateFabricOrdererRequest" }, - "snapshot_interval_size": { - "type": "integer", - "minimum": 1 + "fabricPeer": { + "description": "Platform-specific configurations", + "allOf": [ + { + "$ref": "#/definitions/http.UpdateFabricPeerRequest" + } + ] }, - "tick_interval": { + "name": { + "description": "Common fields", "type": "string" } } @@ -5443,6 +6599,25 @@ const docTemplate = `{ } } }, + "http.UpdateOrganizationCRLRequest": { + "type": "object", + "required": [ + "organizationId" + ], + "properties": { + "organizationId": { + "type": "integer" + } + } + }, + "http.UpdateOrganizationCRLResponse": { + "type": "object", + "properties": { + "transactionId": { + "type": "string" + } + } + }, "http.UpdateProviderRequest": { "type": "object", "required": [ @@ -5515,7 +6690,7 @@ const docTemplate = `{ "extKeyUsage": { "type": "array", "items": { - "$ref": "#/definitions/crypto_x509.ExtKeyUsage" + "$ref": "#/definitions/x509.ExtKeyUsage" } }, "ipAddresses": { @@ -5531,7 +6706,7 @@ const docTemplate = `{ "type": "boolean" }, "keyUsage": { - "$ref": "#/definitions/crypto_x509.KeyUsage" + "$ref": "#/definitions/x509.KeyUsage" }, "locality": { "type": "array", @@ -5815,6 +6990,9 @@ const docTemplate = `{ "sha256Fingerprint": { "type": "string" }, + "signingKeyID": { + "type": "integer" + }, "status": { "type": "string" } @@ -5905,29 +7083,44 @@ const docTemplate = `{ "service.BesuNodeDefaults": { "type": "object", "properties": { - "externalIP": { + "environmentVariables": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "externalIp": { "type": "string" }, - "internalIP": { + "internalIp": { "type": "string" }, "mode": { "$ref": "#/definitions/service.Mode" }, - "networkId": { + "p2pHost": { + "type": "string" + }, + "p2pPort": { "type": "integer" }, - "p2pAddress": { + "rpcHost": { "type": "string" }, - "rpcAddress": { - "type": "string" + "rpcPort": { + "type": "integer" } } }, "service.BesuNodeProperties": { "type": "object", "properties": { + "bootNodes": { + "type": "array", + "items": { + "type": "string" + } + }, "enodeUrl": { "type": "string" }, @@ -5958,6 +7151,43 @@ const docTemplate = `{ }, "rpcPort": { "type": "integer" + }, + "version": { + "type": "string" + } + } + }, + "service.Block": { + "type": "object", + "properties": { + "data": { + "type": "array", + "items": { + "type": "integer" + } + }, + "hash": { + "type": "string" + }, + "number": { + "type": "integer" + }, + "previous_hash": { + "type": "string" + }, + "timestamp": { + "type": "string" + }, + "tx_count": { + "type": "integer" + } + } + }, + "service.CreateSettingParams": { + "type": "object", + "properties": { + "config": { + "$ref": "#/definitions/service.SettingConfig" } } }, @@ -6010,12 +7240,21 @@ const docTemplate = `{ }, "tlsKeyId": { "type": "integer" + }, + "version": { + "type": "string" } } }, "service.FabricPeerProperties": { "type": "object", "properties": { + "addressOverrides": { + "type": "array", + "items": { + "$ref": "#/definitions/types.AddressOverride" + } + }, "chaincodeAddress": { "type": "string" }, @@ -6065,6 +7304,9 @@ const docTemplate = `{ }, "tlsKeyId": { "type": "integer" + }, + "version": { + "type": "string" } } }, @@ -6120,6 +7362,9 @@ const docTemplate = `{ "endpoint": { "type": "string" }, + "errorMessage": { + "type": "string" + }, "id": { "type": "integer" }, @@ -6210,6 +7455,77 @@ const docTemplate = `{ } } }, + "service.Setting": { + "type": "object", + "properties": { + "config": { + "$ref": "#/definitions/service.SettingConfig" + }, + "created_at": { + "type": "string" + }, + "id": { + "type": "integer" + }, + "updated_at": { + "type": "string" + } + } + }, + "service.SettingConfig": { + "type": "object", + "properties": { + "besuTemplateCMD": { + "type": "string" + }, + "ordererTemplateCMD": { + "type": "string" + }, + "peerTemplateCMD": { + "type": "string" + } + } + }, + "service.Transaction": { + "type": "object", + "properties": { + "block_number": { + "type": "integer" + }, + "creator": { + "type": "string" + }, + "payload": { + "type": "array", + "items": { + "type": "integer" + } + }, + "timestamp": { + "type": "string" + }, + "tx_id": { + "type": "string" + }, + "type": { + "type": "string" + } + } + }, + "types.AddressOverride": { + "type": "object", + "properties": { + "from": { + "type": "string" + }, + "tlsCACert": { + "type": "string" + }, + "to": { + "type": "string" + } + } + }, "types.BesuNodeConfig": { "type": "object", "required": [ @@ -6290,6 +7606,13 @@ const docTemplate = `{ "organizationId" ], "properties": { + "addressOverrides": { + "description": "@Description Address overrides for the orderer", + "type": "array", + "items": { + "$ref": "#/definitions/types.AddressOverride" + } + }, "adminAddress": { "type": "string" }, @@ -6348,6 +7671,13 @@ const docTemplate = `{ "organizationId" ], "properties": { + "addressOverrides": { + "description": "@Description Address overrides for the peer", + "type": "array", + "items": { + "$ref": "#/definitions/types.AddressOverride" + } + }, "chaincodeAddress": { "description": "@Description Chaincode listen address", "type": "string", @@ -6402,6 +7732,13 @@ const docTemplate = `{ "type": "string", "example": "0.0.0.0:9443" }, + "ordererAddressOverrides": { + "description": "@Description Orderer address overrides for the peer", + "type": "array", + "items": { + "$ref": "#/definitions/types.OrdererAddressOverride" + } + }, "organizationId": { "description": "@Description Organization ID that owns this peer", "type": "integer", @@ -6427,6 +7764,7 @@ const docTemplate = `{ "STOPPED", "STOPPING", "STARTING", + "UPDATING", "ERROR" ], "x-enum-varnames": [ @@ -6435,6 +7773,7 @@ const docTemplate = `{ "NodeStatusStopped", "NodeStatusStopping", "NodeStatusStarting", + "NodeStatusUpdating", "NodeStatusError" ] }, @@ -6451,6 +7790,28 @@ const docTemplate = `{ "NodeTypeBesuFullnode" ] }, + "types.OrdererAddressOverride": { + "type": "object", + "required": [ + "from", + "tlsCACert", + "to" + ], + "properties": { + "from": { + "description": "@Description Original orderer address", + "type": "string" + }, + "tlsCACert": { + "description": "@Description TLS CA certificate in PEM format", + "type": "string" + }, + "to": { + "description": "@Description New orderer address to use", + "type": "string" + } + } + }, "url.URL": { "type": "object", "properties": { @@ -6505,6 +7866,66 @@ const docTemplate = `{ }, "url.Userinfo": { "type": "object" + }, + "x509.ExtKeyUsage": { + "type": "integer", + "enum": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13 + ], + "x-enum-varnames": [ + "ExtKeyUsageAny", + "ExtKeyUsageServerAuth", + "ExtKeyUsageClientAuth", + "ExtKeyUsageCodeSigning", + "ExtKeyUsageEmailProtection", + "ExtKeyUsageIPSECEndSystem", + "ExtKeyUsageIPSECTunnel", + "ExtKeyUsageIPSECUser", + "ExtKeyUsageTimeStamping", + "ExtKeyUsageOCSPSigning", + "ExtKeyUsageMicrosoftServerGatedCrypto", + "ExtKeyUsageNetscapeServerGatedCrypto", + "ExtKeyUsageMicrosoftCommercialCodeSigning", + "ExtKeyUsageMicrosoftKernelCodeSigning" + ] + }, + "x509.KeyUsage": { + "type": "integer", + "enum": [ + 1, + 2, + 4, + 8, + 16, + 32, + 64, + 128, + 256 + ], + "x-enum-varnames": [ + "KeyUsageDigitalSignature", + "KeyUsageContentCommitment", + "KeyUsageKeyEncipherment", + "KeyUsageDataEncipherment", + "KeyUsageKeyAgreement", + "KeyUsageCertSign", + "KeyUsageCRLSign", + "KeyUsageEncipherOnly", + "KeyUsageDecipherOnly" + ] } }, "securityDefinitions": { @@ -6526,10 +7947,6 @@ const docTemplate = `{ "description": "Key provider management operations", "name": "Providers" }, - { - "description": "Blockchain network management operations", - "name": "Networks" - }, { "description": "Network node management operations", "name": "Nodes" diff --git a/docs/swagger.json b/docs/swagger.json index 7fec2f3..e1bd4d8 100644 --- a/docs/swagger.json +++ b/docs/swagger.json @@ -10,8 +10,8 @@ "termsOfService": "http://swagger.io/terms/", "contact": { "name": "API Support", - "url": "http://www.chainlaunch.com/support", - "email": "support@chainlaunch.com" + "url": "http://chainlaunch.dev/support", + "email": "support@chainlaunch.dev" }, "license": { "name": "Apache 2.0", @@ -149,7 +149,7 @@ "application/json" ], "tags": [ - "backups" + "Backups" ], "summary": "List all backups", "responses": { @@ -179,7 +179,7 @@ "application/json" ], "tags": [ - "backups" + "Backups" ], "summary": "Create a new backup", "parameters": [ @@ -225,7 +225,7 @@ "application/json" ], "tags": [ - "backup-schedules" + "Backup Schedules" ], "summary": "List all backup schedules", "responses": { @@ -255,7 +255,7 @@ "application/json" ], "tags": [ - "backup-schedules" + "Backup Schedules" ], "summary": "Create a new backup schedule", "parameters": [ @@ -301,7 +301,7 @@ "application/json" ], "tags": [ - "backup-schedules" + "Backup Schedules" ], "summary": "Get a backup schedule by ID", "parameters": [ @@ -349,7 +349,7 @@ "application/json" ], "tags": [ - "backup-schedules" + "Backup Schedules" ], "summary": "Update a backup schedule", "parameters": [ @@ -406,7 +406,7 @@ "application/json" ], "tags": [ - "backup-schedules" + "Backup Schedules" ], "summary": "Delete a backup schedule", "parameters": [ @@ -453,7 +453,7 @@ "application/json" ], "tags": [ - "backup-schedules" + "Backup Schedules" ], "summary": "Disable a backup schedule", "parameters": [ @@ -503,7 +503,7 @@ "application/json" ], "tags": [ - "backup-schedules" + "Backup Schedules" ], "summary": "Enable a backup schedule", "parameters": [ @@ -553,7 +553,7 @@ "application/json" ], "tags": [ - "backup-targets" + "Backup Targets" ], "summary": "List all backup targets", "responses": { @@ -583,7 +583,7 @@ "application/json" ], "tags": [ - "backup-targets" + "Backup Targets" ], "summary": "Create a new backup target", "parameters": [ @@ -629,7 +629,7 @@ "application/json" ], "tags": [ - "backup-targets" + "Backup Targets" ], "summary": "Get a backup target by ID", "parameters": [ @@ -677,7 +677,7 @@ "application/json" ], "tags": [ - "backup-targets" + "Backup Targets" ], "summary": "Update a backup target", "parameters": [ @@ -734,7 +734,7 @@ "application/json" ], "tags": [ - "backup-targets" + "Backup Targets" ], "summary": "Delete a backup target", "parameters": [ @@ -781,7 +781,7 @@ "application/json" ], "tags": [ - "backups" + "Backups" ], "summary": "Get a backup by ID", "parameters": [ @@ -829,7 +829,7 @@ "application/json" ], "tags": [ - "backups" + "Backups" ], "summary": "Delete a backup", "parameters": [ @@ -876,7 +876,7 @@ "application/json" ], "tags": [ - "fabric-networks" + "Fabric Networks" ], "summary": "Submit config update proposal", "parameters": [ @@ -963,7 +963,7 @@ "application/json" ], "tags": [ - "providers" + "Providers" ], "summary": "List all key providers", "responses": { @@ -996,7 +996,7 @@ "application/json" ], "tags": [ - "providers" + "Providers" ], "summary": "Create a new key provider", "parameters": [ @@ -1048,7 +1048,7 @@ "application/json" ], "tags": [ - "providers" + "Providers" ], "summary": "Get a specific provider", "parameters": [ @@ -1105,7 +1105,7 @@ "application/json" ], "tags": [ - "providers" + "Providers" ], "summary": "Delete a provider", "parameters": [ @@ -1170,7 +1170,7 @@ "application/json" ], "tags": [ - "keys" + "Keys" ], "summary": "Get paginated keys", "parameters": [ @@ -1221,7 +1221,7 @@ "application/json" ], "tags": [ - "keys" + "Keys" ], "summary": "Create a new key", "parameters": [ @@ -1273,7 +1273,7 @@ "application/json" ], "tags": [ - "keys" + "Keys" ], "summary": "Get all keys", "responses": { @@ -1308,7 +1308,7 @@ "application/json" ], "tags": [ - "keys" + "Keys" ], "summary": "Filter keys by algorithm and curve", "parameters": [ @@ -1377,7 +1377,7 @@ "application/json" ], "tags": [ - "keys" + "Keys" ], "summary": "Get a specific key by ID", "parameters": [ @@ -1434,7 +1434,7 @@ "application/json" ], "tags": [ - "keys" + "Keys" ], "summary": "Delete a key", "parameters": [ @@ -1490,7 +1490,7 @@ "application/json" ], "tags": [ - "keys" + "Keys" ], "summary": "Sign a certificate", "parameters": [ @@ -1555,7 +1555,7 @@ "application/json" ], "tags": [ - "besu-networks" + "Besu Networks" ], "summary": "List Besu networks", "parameters": [ @@ -1602,7 +1602,7 @@ "application/json" ], "tags": [ - "besu-networks" + "Besu Networks" ], "summary": "Create a new Besu network", "parameters": [ @@ -1617,8 +1617,8 @@ } ], "responses": { - "201": { - "description": "Created", + "200": { + "description": "OK", "schema": { "$ref": "#/definitions/http.BesuNetworkResponse" } @@ -1648,7 +1648,7 @@ "application/json" ], "tags": [ - "besu-networks" + "Besu Networks" ], "summary": "Import a Besu network", "parameters": [ @@ -1691,7 +1691,7 @@ "application/json" ], "tags": [ - "besu-networks" + "Besu Networks" ], "summary": "Get a Besu network by ID", "parameters": [ @@ -1736,7 +1736,7 @@ "application/json" ], "tags": [ - "besu-networks" + "Besu Networks" ], "summary": "Delete a Besu network", "parameters": [ @@ -1780,7 +1780,7 @@ "application/json" ], "tags": [ - "fabric-networks" + "Fabric Networks" ], "summary": "List Fabric networks", "parameters": [ @@ -1827,7 +1827,7 @@ "application/json" ], "tags": [ - "fabric-networks" + "Fabric Networks" ], "summary": "Create a new Fabric network", "parameters": [ @@ -1870,7 +1870,7 @@ "application/json" ], "tags": [ - "fabric-networks" + "Fabric Networks" ], "summary": "Get a Fabric network by slug", "parameters": [ @@ -1920,7 +1920,7 @@ "application/json" ], "tags": [ - "fabric-networks" + "Fabric Networks" ], "summary": "Import a Fabric network", "parameters": [ @@ -1966,7 +1966,7 @@ "application/json" ], "tags": [ - "fabric-networks" + "Fabric Networks" ], "summary": "Import a Fabric network with organization", "parameters": [ @@ -2009,7 +2009,7 @@ "application/json" ], "tags": [ - "fabric-networks" + "Fabric Networks" ], "summary": "Get a Fabric network by ID", "parameters": [ @@ -2054,7 +2054,7 @@ "application/json" ], "tags": [ - "fabric-networks" + "Fabric Networks" ], "summary": "Delete a Fabric network", "parameters": [ @@ -2101,7 +2101,7 @@ "application/json" ], "tags": [ - "fabric-networks" + "Fabric Networks" ], "summary": "Set anchor peers for an organization", "parameters": [ @@ -2144,6 +2144,125 @@ } } }, + "/networks/fabric/{id}/blocks": { + "get": { + "description": "Get a paginated list of blocks from a Fabric network", + "produces": [ + "application/json" + ], + "tags": [ + "Fabric Networks" + ], + "summary": "Get list of blocks from Fabric network", + "parameters": [ + { + "type": "integer", + "description": "Network ID", + "name": "id", + "in": "path", + "required": true + }, + { + "type": "integer", + "description": "Number of blocks to return (default: 10)", + "name": "limit", + "in": "query" + }, + { + "type": "integer", + "description": "Number of blocks to skip (default: 0)", + "name": "offset", + "in": "query" + }, + { + "type": "boolean", + "description": "Get blocks in reverse order (default: false)", + "name": "reverse", + "in": "query" + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/http.BlockListResponse" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/github_com_chainlaunch_chainlaunch_pkg_networks_http.ErrorResponse" + } + }, + "404": { + "description": "Not Found", + "schema": { + "$ref": "#/definitions/github_com_chainlaunch_chainlaunch_pkg_networks_http.ErrorResponse" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/github_com_chainlaunch_chainlaunch_pkg_networks_http.ErrorResponse" + } + } + } + } + }, + "/networks/fabric/{id}/blocks/{blockNum}": { + "get": { + "description": "Get all transactions from a specific block in a Fabric network", + "produces": [ + "application/json" + ], + "tags": [ + "Fabric Networks" + ], + "summary": "Get transactions from a specific block", + "parameters": [ + { + "type": "integer", + "description": "Network ID", + "name": "id", + "in": "path", + "required": true + }, + { + "type": "integer", + "description": "Block Number", + "name": "blockNum", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/http.BlockTransactionsResponse" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/github_com_chainlaunch_chainlaunch_pkg_networks_http.ErrorResponse" + } + }, + "404": { + "description": "Not Found", + "schema": { + "$ref": "#/definitions/github_com_chainlaunch_chainlaunch_pkg_networks_http.ErrorResponse" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/github_com_chainlaunch_chainlaunch_pkg_networks_http.ErrorResponse" + } + } + } + } + }, "/networks/fabric/{id}/channel-config": { "get": { "description": "Retrieve the channel configuration for a Fabric network", @@ -2151,7 +2270,7 @@ "application/json" ], "tags": [ - "fabric-networks" + "Fabric Networks" ], "summary": "Get Fabric network channel configuration", "parameters": [ @@ -2192,7 +2311,7 @@ "application/json" ], "tags": [ - "fabric-networks" + "Fabric Networks" ], "summary": "Get Fabric network current channel configuration", "parameters": [ @@ -2226,6 +2345,56 @@ } } }, + "/networks/fabric/{id}/info": { + "get": { + "description": "Retrieve detailed information about the Fabric blockchain including height and block hashes", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "Fabric Networks" + ], + "summary": "Get Fabric chain information", + "parameters": [ + { + "type": "integer", + "description": "Network ID", + "name": "id", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/http.ChainInfoResponse" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/github_com_chainlaunch_chainlaunch_pkg_networks_http.ErrorResponse" + } + }, + "404": { + "description": "Not Found", + "schema": { + "$ref": "#/definitions/github_com_chainlaunch_chainlaunch_pkg_networks_http.ErrorResponse" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/github_com_chainlaunch_chainlaunch_pkg_networks_http.ErrorResponse" + } + } + } + } + }, "/networks/fabric/{id}/nodes": { "get": { "description": "Get all nodes associated with a network", @@ -2233,7 +2402,7 @@ "application/json" ], "tags": [ - "fabric-networks" + "Fabric Networks" ], "summary": "Get network nodes", "parameters": [ @@ -2281,7 +2450,7 @@ "application/json" ], "tags": [ - "fabric-networks" + "Fabric Networks" ], "summary": "Add node to network", "parameters": [ @@ -2334,7 +2503,7 @@ "application/json" ], "tags": [ - "fabric-networks" + "Fabric Networks" ], "summary": "Remove orderer from Fabric network", "parameters": [ @@ -2385,7 +2554,7 @@ "application/json" ], "tags": [ - "fabric-networks" + "Fabric Networks" ], "summary": "Join orderer to Fabric network", "parameters": [ @@ -2436,7 +2605,7 @@ "application/json" ], "tags": [ - "fabric-networks" + "Fabric Networks" ], "summary": "Unjoin orderer from Fabric network", "parameters": [ @@ -2477,6 +2646,65 @@ } } }, + "/networks/fabric/{id}/organization-crl": { + "post": { + "description": "Update the Certificate Revocation List (CRL) for an organization in the network", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "Fabric Networks" + ], + "summary": "Update organization CRL", + "parameters": [ + { + "type": "integer", + "description": "Network ID", + "name": "id", + "in": "path", + "required": true + }, + { + "description": "Organization CRL update request", + "name": "request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/http.UpdateOrganizationCRLRequest" + } + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/http.UpdateOrganizationCRLResponse" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/github_com_chainlaunch_chainlaunch_pkg_networks_http.ErrorResponse" + } + }, + "404": { + "description": "Not Found", + "schema": { + "$ref": "#/definitions/github_com_chainlaunch_chainlaunch_pkg_networks_http.ErrorResponse" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/github_com_chainlaunch_chainlaunch_pkg_networks_http.ErrorResponse" + } + } + } + } + }, "/networks/fabric/{id}/organizations/{orgId}/config": { "get": { "description": "Get the network configuration as YAML", @@ -2484,7 +2712,7 @@ "text/yaml" ], "tags": [ - "fabric-networks" + "Fabric Networks" ], "summary": "Get network configuration", "parameters": [ @@ -2541,7 +2769,7 @@ "application/json" ], "tags": [ - "fabric-networks" + "Fabric Networks" ], "summary": "Remove peer from Fabric network", "parameters": [ @@ -2592,7 +2820,7 @@ "application/json" ], "tags": [ - "fabric-networks" + "Fabric Networks" ], "summary": "Join peer to Fabric network", "parameters": [ @@ -2643,7 +2871,7 @@ "application/json" ], "tags": [ - "fabric-networks" + "Fabric Networks" ], "summary": "Unjoin peer from Fabric network", "parameters": [ @@ -2694,7 +2922,7 @@ "application/json" ], "tags": [ - "fabric-networks" + "Fabric Networks" ], "summary": "Reload network config block", "parameters": [ @@ -2746,64 +2974,63 @@ } } }, - "/nodes": { + "/networks/fabric/{id}/transactions/{txId}": { "get": { - "description": "Get a paginated list of nodes with optional platform filter", - "consumes": [ - "application/json" - ], + "description": "Get detailed information about a specific transaction in a Fabric network", "produces": [ "application/json" ], "tags": [ - "nodes" + "Fabric Networks" ], - "summary": "List all nodes", + "summary": "Get transaction details by transaction ID", "parameters": [ - { - "type": "string", - "description": "Filter by blockchain platform", - "name": "platform", - "in": "query" - }, { "type": "integer", - "default": 1, - "description": "Page number", - "name": "page", - "in": "query" + "description": "Network ID", + "name": "id", + "in": "path", + "required": true }, { - "type": "integer", - "default": 10, - "description": "Items per page", - "name": "limit", - "in": "query" + "type": "string", + "description": "Transaction ID", + "name": "txId", + "in": "path", + "required": true } ], "responses": { "200": { "description": "OK", "schema": { - "$ref": "#/definitions/http.PaginatedNodesResponse" + "$ref": "#/definitions/http.TransactionResponse" } }, "400": { - "description": "Validation error", + "description": "Bad Request", "schema": { - "$ref": "#/definitions/response.ErrorResponse" + "$ref": "#/definitions/github_com_chainlaunch_chainlaunch_pkg_networks_http.ErrorResponse" + } + }, + "404": { + "description": "Not Found", + "schema": { + "$ref": "#/definitions/github_com_chainlaunch_chainlaunch_pkg_networks_http.ErrorResponse" } }, "500": { - "description": "Internal server error", + "description": "Internal Server Error", "schema": { - "$ref": "#/definitions/response.ErrorResponse" + "$ref": "#/definitions/github_com_chainlaunch_chainlaunch_pkg_networks_http.ErrorResponse" } } } - }, + } + }, + "/networks/fabric/{id}/update-config": { "post": { - "description": "Create a new node with the specified configuration", + "description": "Prepare a config update proposal for a Fabric network using the provided operations.\nThe following operation types are supported:\n- add_org: Add a new organization to the channel\n- remove_org: Remove an organization from the channel\n- update_org_msp: Update an organization's MSP configuration\n- set_anchor_peers: Set anchor peers for an organization\n- add_consenter: Add a new consenter to the orderer\n- remove_consenter: Remove a consenter from the orderer\n- update_consenter: Update a consenter in the orderer\n- update_etcd_raft_options: Update etcd raft options for the orderer\n- update_batch_size: Update batch size for the orderer\n- update_batch_timeout: Update batch timeout for the orderer", "consumes": [ "application/json" ], @@ -2811,15 +3038,123 @@ "application/json" ], "tags": [ - "nodes" + "Fabric Networks" ], - "summary": "Create a new node", + "summary": "Prepare a config update for a Fabric network", "parameters": [ { - "description": "Node creation request", - "name": "request", - "in": "body", - "required": true, + "type": "integer", + "description": "Network ID", + "name": "id", + "in": "path", + "required": true + }, + { + "description": "Config update operations", + "name": "request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/http.UpdateFabricNetworkRequest" + } + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/http.ConfigUpdateResponse" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/github_com_chainlaunch_chainlaunch_pkg_networks_http.ErrorResponse" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/github_com_chainlaunch_chainlaunch_pkg_networks_http.ErrorResponse" + } + } + } + } + }, + "/nodes": { + "get": { + "description": "Get a paginated list of nodes with optional platform filter", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "Nodes" + ], + "summary": "List all nodes", + "parameters": [ + { + "type": "string", + "description": "Filter by blockchain platform", + "name": "platform", + "in": "query" + }, + { + "type": "integer", + "default": 1, + "description": "Page number", + "name": "page", + "in": "query" + }, + { + "type": "integer", + "default": 10, + "description": "Items per page", + "name": "limit", + "in": "query" + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/http.PaginatedNodesResponse" + } + }, + "400": { + "description": "Validation error", + "schema": { + "$ref": "#/definitions/response.ErrorResponse" + } + }, + "500": { + "description": "Internal server error", + "schema": { + "$ref": "#/definitions/response.ErrorResponse" + } + } + } + }, + "post": { + "description": "Create a new node with the specified configuration", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "Nodes" + ], + "summary": "Create a new node", + "parameters": [ + { + "description": "Node creation request", + "name": "request", + "in": "body", + "required": true, "schema": { "$ref": "#/definitions/http.CreateNodeRequest" } @@ -2854,14 +3189,27 @@ "application/json" ], "tags": [ - "nodes" + "Nodes" ], "summary": "Get default values for Besu node", + "parameters": [ + { + "minimum": 0, + "type": "integer", + "default": 1, + "description": "Number of Besu nodes", + "name": "besuNodes", + "in": "query" + } + ], "responses": { "200": { "description": "OK", "schema": { - "$ref": "#/definitions/service.BesuNodeDefaults" + "type": "array", + "items": { + "$ref": "#/definitions/http.BesuNodeDefaultsResponse" + } } }, "500": { @@ -2880,7 +3228,7 @@ "application/json" ], "tags": [ - "nodes" + "Nodes" ], "summary": "Get default values for multiple Fabric nodes", "parameters": [ @@ -2941,7 +3289,7 @@ "application/json" ], "tags": [ - "nodes" + "Nodes" ], "summary": "Get default values for Fabric orderer node", "responses": { @@ -2967,7 +3315,7 @@ "application/json" ], "tags": [ - "nodes" + "Nodes" ], "summary": "Get default values for Fabric peer node", "responses": { @@ -2996,7 +3344,7 @@ "application/json" ], "tags": [ - "nodes" + "Nodes" ], "summary": "List nodes by platform", "parameters": [ @@ -3058,7 +3406,7 @@ "application/json" ], "tags": [ - "nodes" + "Nodes" ], "summary": "Get a node", "parameters": [ @@ -3097,8 +3445,8 @@ } } }, - "delete": { - "description": "Delete a node by ID", + "put": { + "description": "Updates an existing node's configuration based on its type", "consumes": [ "application/json" ], @@ -3106,9 +3454,9 @@ "application/json" ], "tags": [ - "nodes" + "Nodes" ], - "summary": "Delete a node", + "summary": "Update a node", "parameters": [ { "type": "integer", @@ -3116,11 +3464,23 @@ "name": "id", "in": "path", "required": true + }, + { + "description": "Update node request", + "name": "request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/http.UpdateNodeRequest" + } } ], "responses": { - "204": { - "description": "No Content" + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/http.NodeResponse" + } }, "400": { "description": "Validation error", @@ -3141,11 +3501,9 @@ } } } - } - }, - "/nodes/{id}/events": { - "get": { - "description": "Get a paginated list of events for a specific node", + }, + "delete": { + "description": "Delete a node by ID", "consumes": [ "application/json" ], @@ -3153,9 +3511,9 @@ "application/json" ], "tags": [ - "nodes" + "Nodes" ], - "summary": "Get node events", + "summary": "Delete a node", "parameters": [ { "type": "integer", @@ -3163,28 +3521,11 @@ "name": "id", "in": "path", "required": true - }, - { - "type": "integer", - "default": 1, - "description": "Page number", - "name": "page", - "in": "query" - }, - { - "type": "integer", - "default": 10, - "description": "Items per page", - "name": "limit", - "in": "query" } ], "responses": { - "200": { - "description": "OK", - "schema": { - "$ref": "#/definitions/http.PaginatedNodeEventsResponse" - } + "204": { + "description": "No Content" }, "400": { "description": "Validation error", @@ -3207,19 +3548,19 @@ } } }, - "/nodes/{id}/logs": { - "get": { - "description": "Stream logs from a specific node", + "/nodes/{id}/certificates/renew": { + "post": { + "description": "Renews the TLS and signing certificates for a Fabric node", "consumes": [ "application/json" ], "produces": [ - "text/event-stream" + "application/json" ], "tags": [ - "nodes" + "Nodes" ], - "summary": "Tail node logs", + "summary": "Renew node certificates", "parameters": [ { "type": "integer", @@ -3227,27 +3568,13 @@ "name": "id", "in": "path", "required": true - }, - { - "type": "boolean", - "default": false, - "description": "Follow logs", - "name": "follow", - "in": "query" - }, - { - "type": "integer", - "default": 100, - "description": "Number of lines to show from the end", - "name": "tail", - "in": "query" } ], "responses": { "200": { - "description": "Log stream", + "description": "OK", "schema": { - "type": "string" + "$ref": "#/definitions/http.NodeResponse" } }, "400": { @@ -3271,9 +3598,9 @@ } } }, - "/nodes/{id}/restart": { - "post": { - "description": "Restart a node by ID (stops and starts the node)", + "/nodes/{id}/channels": { + "get": { + "description": "Retrieves all channels for a specific Fabric node", "consumes": [ "application/json" ], @@ -3281,9 +3608,9 @@ "application/json" ], "tags": [ - "nodes" + "Nodes" ], - "summary": "Restart a node", + "summary": "Get channels for a Fabric node", "parameters": [ { "type": "integer", @@ -3297,7 +3624,7 @@ "200": { "description": "OK", "schema": { - "$ref": "#/definitions/http.NodeResponse" + "$ref": "#/definitions/http.NodeChannelsResponse" } }, "400": { @@ -3321,9 +3648,9 @@ } } }, - "/nodes/{id}/start": { - "post": { - "description": "Start a node by ID", + "/nodes/{id}/events": { + "get": { + "description": "Get a paginated list of events for a specific node", "consumes": [ "application/json" ], @@ -3331,9 +3658,9 @@ "application/json" ], "tags": [ - "nodes" + "Nodes" ], - "summary": "Start a node", + "summary": "Get node events", "parameters": [ { "type": "integer", @@ -3341,13 +3668,27 @@ "name": "id", "in": "path", "required": true + }, + { + "type": "integer", + "default": 1, + "description": "Page number", + "name": "page", + "in": "query" + }, + { + "type": "integer", + "default": 10, + "description": "Items per page", + "name": "limit", + "in": "query" } ], "responses": { "200": { "description": "OK", "schema": { - "$ref": "#/definitions/http.NodeResponse" + "$ref": "#/definitions/http.PaginatedNodeEventsResponse" } }, "400": { @@ -3371,19 +3712,19 @@ } } }, - "/nodes/{id}/stop": { - "post": { - "description": "Stop a node by ID", + "/nodes/{id}/logs": { + "get": { + "description": "Stream logs from a specific node", "consumes": [ "application/json" ], "produces": [ - "application/json" + "text/event-stream" ], "tags": [ - "nodes" + "Nodes" ], - "summary": "Stop a node", + "summary": "Tail node logs", "parameters": [ { "type": "integer", @@ -3391,13 +3732,27 @@ "name": "id", "in": "path", "required": true + }, + { + "type": "boolean", + "default": false, + "description": "Follow logs", + "name": "follow", + "in": "query" + }, + { + "type": "integer", + "default": 100, + "description": "Number of lines to show from the end", + "name": "tail", + "in": "query" } ], "responses": { "200": { - "description": "OK", + "description": "Log stream", "schema": { - "$ref": "#/definitions/http.NodeResponse" + "type": "string" } }, "400": { @@ -3421,9 +3776,9 @@ } } }, - "/notifications/providers": { - "get": { - "description": "Get a list of all notification providers", + "/nodes/{id}/restart": { + "post": { + "description": "Restart a node by ID (stops and starts the node)", "consumes": [ "application/json" ], @@ -3431,7 +3786,157 @@ "application/json" ], "tags": [ - "notifications" + "Nodes" + ], + "summary": "Restart a node", + "parameters": [ + { + "type": "integer", + "description": "Node ID", + "name": "id", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/http.NodeResponse" + } + }, + "400": { + "description": "Validation error", + "schema": { + "$ref": "#/definitions/response.ErrorResponse" + } + }, + "404": { + "description": "Node not found", + "schema": { + "$ref": "#/definitions/response.ErrorResponse" + } + }, + "500": { + "description": "Internal server error", + "schema": { + "$ref": "#/definitions/response.ErrorResponse" + } + } + } + } + }, + "/nodes/{id}/start": { + "post": { + "description": "Start a node by ID", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "Nodes" + ], + "summary": "Start a node", + "parameters": [ + { + "type": "integer", + "description": "Node ID", + "name": "id", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/http.NodeResponse" + } + }, + "400": { + "description": "Validation error", + "schema": { + "$ref": "#/definitions/response.ErrorResponse" + } + }, + "404": { + "description": "Node not found", + "schema": { + "$ref": "#/definitions/response.ErrorResponse" + } + }, + "500": { + "description": "Internal server error", + "schema": { + "$ref": "#/definitions/response.ErrorResponse" + } + } + } + } + }, + "/nodes/{id}/stop": { + "post": { + "description": "Stop a node by ID", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "Nodes" + ], + "summary": "Stop a node", + "parameters": [ + { + "type": "integer", + "description": "Node ID", + "name": "id", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/http.NodeResponse" + } + }, + "400": { + "description": "Validation error", + "schema": { + "$ref": "#/definitions/response.ErrorResponse" + } + }, + "404": { + "description": "Node not found", + "schema": { + "$ref": "#/definitions/response.ErrorResponse" + } + }, + "500": { + "description": "Internal server error", + "schema": { + "$ref": "#/definitions/response.ErrorResponse" + } + } + } + } + }, + "/notifications/providers": { + "get": { + "description": "Get a list of all notification providers", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "Notifications" ], "summary": "List notification providers", "responses": { @@ -3461,7 +3966,7 @@ "application/json" ], "tags": [ - "notifications" + "Notifications" ], "summary": "Create a notification provider", "parameters": [ @@ -3507,7 +4012,7 @@ "application/json" ], "tags": [ - "notifications" + "Notifications" ], "summary": "Get a notification provider", "parameters": [ @@ -3555,7 +4060,7 @@ "application/json" ], "tags": [ - "notifications" + "Notifications" ], "summary": "Update a notification provider", "parameters": [ @@ -3612,7 +4117,7 @@ "application/json" ], "tags": [ - "notifications" + "Notifications" ], "summary": "Delete a notification provider", "parameters": [ @@ -3659,7 +4164,7 @@ "application/json" ], "tags": [ - "notifications" + "Notifications" ], "summary": "Test a notification provider", "parameters": [ @@ -3712,7 +4217,7 @@ "application/json" ], "tags": [ - "organizations" + "Organizations" ], "summary": "List all Fabric organizations", "responses": { @@ -3745,7 +4250,7 @@ "application/json" ], "tags": [ - "organizations" + "Organizations" ], "summary": "Create a new Fabric organization", "parameters": [ @@ -3797,7 +4302,7 @@ "application/json" ], "tags": [ - "organizations" + "Organizations" ], "summary": "Get a Fabric organization by MSP ID", "parameters": [ @@ -3847,7 +4352,7 @@ "application/json" ], "tags": [ - "organizations" + "Organizations" ], "summary": "Get a Fabric organization", "parameters": [ @@ -3895,7 +4400,7 @@ "application/json" ], "tags": [ - "organizations" + "Organizations" ], "summary": "Update a Fabric organization", "parameters": [ @@ -3961,7 +4466,7 @@ "application/json" ], "tags": [ - "organizations" + "Organizations" ], "summary": "Delete a Fabric organization", "parameters": [ @@ -3997,117 +4502,405 @@ } } } - } - }, - "definitions": { - "auth.LoginRequest": { - "type": "object", - "properties": { - "password": { - "type": "string" - }, - "username": { - "type": "string" - } - } }, - "auth.LoginResponse": { - "description": "Login response", - "type": "object", - "properties": { - "message": { - "description": "Success message\n@Example \"Login successful\"", - "type": "string" - } - } - }, - "auth.LogoutResponse": { - "description": "Logout response", - "type": "object", - "properties": { - "message": { - "description": "Success message\n@Example \"Logout successful\"", - "type": "string" - } - } - }, - "auth.UserResponse": { - "description": "User information response", - "type": "object", - "properties": { - "created_at": { - "description": "Time when the user was created\n@Example \"2024-01-01T00:00:00Z\"", - "type": "string" - }, - "last_login_at": { - "description": "Last time the user logged in\n@Example \"2024-01-01T12:34:56Z\"", - "type": "string" - }, - "username": { - "description": "Username of the user\n@Example \"admin\"", - "type": "string" + "/organizations/{id}/crl": { + "get": { + "description": "Get the current Certificate Revocation List for the organization", + "consumes": [ + "application/json" + ], + "produces": [ + "application/x-pem-file" + ], + "tags": [ + "Organizations" + ], + "summary": "Get organization's CRL", + "parameters": [ + { + "type": "integer", + "description": "Organization ID", + "name": "id", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "PEM encoded CRL", + "schema": { + "type": "string" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "type": "object", + "additionalProperties": { + "type": "string" + } + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "type": "object", + "additionalProperties": { + "type": "string" + } + } + } } } }, - "crypto_x509.ExtKeyUsage": { - "type": "integer", - "enum": [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13 - ], - "x-enum-varnames": [ - "ExtKeyUsageAny", - "ExtKeyUsageServerAuth", - "ExtKeyUsageClientAuth", - "ExtKeyUsageCodeSigning", - "ExtKeyUsageEmailProtection", - "ExtKeyUsageIPSECEndSystem", - "ExtKeyUsageIPSECTunnel", - "ExtKeyUsageIPSECUser", - "ExtKeyUsageTimeStamping", - "ExtKeyUsageOCSPSigning", - "ExtKeyUsageMicrosoftServerGatedCrypto", - "ExtKeyUsageNetscapeServerGatedCrypto", - "ExtKeyUsageMicrosoftCommercialCodeSigning", - "ExtKeyUsageMicrosoftKernelCodeSigning" - ] + "/organizations/{id}/crl/revoke/pem": { + "post": { + "description": "Add a certificate to the organization's CRL using its PEM encoded data", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "Organizations" + ], + "summary": "Revoke a certificate using PEM data", + "parameters": [ + { + "type": "integer", + "description": "Organization ID", + "name": "id", + "in": "path", + "required": true + }, + { + "description": "Certificate revocation request", + "name": "request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/handler.RevokeCertificateByPEMRequest" + } + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "type": "object", + "additionalProperties": { + "type": "string" + } + } + }, + "400": { + "description": "Bad Request", + "schema": { + "type": "object", + "additionalProperties": { + "type": "string" + } + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "type": "object", + "additionalProperties": { + "type": "string" + } + } + } + } + } }, - "crypto_x509.KeyUsage": { - "type": "integer", - "enum": [ - 1, - 2, - 4, - 8, - 16, - 32, - 64, - 128, - 256 - ], - "x-enum-varnames": [ - "KeyUsageDigitalSignature", - "KeyUsageContentCommitment", - "KeyUsageKeyEncipherment", - "KeyUsageDataEncipherment", - "KeyUsageKeyAgreement", - "KeyUsageCertSign", - "KeyUsageCRLSign", - "KeyUsageEncipherOnly", - "KeyUsageDecipherOnly" - ] + "/organizations/{id}/crl/revoke/serial": { + "post": { + "description": "Add a certificate to the organization's CRL using its serial number", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "Organizations" + ], + "summary": "Revoke a certificate using its serial number", + "parameters": [ + { + "type": "integer", + "description": "Organization ID", + "name": "id", + "in": "path", + "required": true + }, + { + "description": "Certificate revocation request", + "name": "request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/handler.RevokeCertificateBySerialRequest" + } + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "type": "object", + "additionalProperties": { + "type": "string" + } + } + }, + "400": { + "description": "Bad Request", + "schema": { + "type": "object", + "additionalProperties": { + "type": "string" + } + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "type": "object", + "additionalProperties": { + "type": "string" + } + } + } + } + }, + "delete": { + "description": "Remove a certificate from the organization's CRL using its serial number", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "Organizations" + ], + "summary": "Delete a revoked certificate using its serial number", + "parameters": [ + { + "type": "integer", + "description": "Organization ID", + "name": "id", + "in": "path", + "required": true + }, + { + "description": "Certificate deletion request", + "name": "request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/handler.DeleteRevokedCertificateRequest" + } + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "type": "object", + "additionalProperties": { + "type": "string" + } + } + }, + "400": { + "description": "Bad Request", + "schema": { + "type": "object", + "additionalProperties": { + "type": "string" + } + } + }, + "404": { + "description": "Not Found", + "schema": { + "type": "object", + "additionalProperties": { + "type": "string" + } + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "type": "object", + "additionalProperties": { + "type": "string" + } + } + } + } + } + }, + "/organizations/{id}/revoked-certificates": { + "get": { + "description": "Get all revoked certificates for the organization", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "Organizations" + ], + "summary": "Get organization's revoked certificates", + "parameters": [ + { + "type": "integer", + "description": "Organization ID", + "name": "id", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/handler.RevokedCertificateResponse" + } + } + }, + "400": { + "description": "Bad Request", + "schema": { + "type": "object", + "additionalProperties": { + "type": "string" + } + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "type": "object", + "additionalProperties": { + "type": "string" + } + } + } + } + } + }, + "/settings": { + "get": { + "description": "Get the default setting's details", + "produces": [ + "application/json" + ], + "tags": [ + "Settings" + ], + "summary": "Get the default setting", + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/service.Setting" + } + } + } + }, + "post": { + "description": "Create or update the default setting with the provided configuration", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "Settings" + ], + "summary": "Create or update the default setting", + "parameters": [ + { + "description": "Setting configuration", + "name": "setting", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/service.CreateSettingParams" + } + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/service.Setting" + } + } + } + } + } + }, + "definitions": { + "auth.LoginRequest": { + "type": "object", + "properties": { + "password": { + "type": "string" + }, + "username": { + "type": "string" + } + } + }, + "auth.LoginResponse": { + "description": "Login response", + "type": "object", + "properties": { + "message": { + "description": "Success message\n@Example \"Login successful\"", + "type": "string" + } + } + }, + "auth.LogoutResponse": { + "description": "Logout response", + "type": "object", + "properties": { + "message": { + "description": "Success message\n@Example \"Logout successful\"", + "type": "string" + } + } + }, + "auth.UserResponse": { + "description": "User information response", + "type": "object", + "properties": { + "created_at": { + "description": "Time when the user was created\n@Example \"2024-01-01T00:00:00Z\"", + "type": "string" + }, + "last_login_at": { + "description": "Last time the user logged in\n@Example \"2024-01-01T12:34:56Z\"", + "type": "string" + }, + "username": { + "description": "Username of the user\n@Example \"admin\"", + "type": "string" + } + } }, "github_com_chainlaunch_chainlaunch_pkg_networks_http.ErrorResponse": { "type": "object", @@ -4152,6 +4945,15 @@ } } }, + "handler.DeleteRevokedCertificateRequest": { + "type": "object", + "properties": { + "serialNumber": { + "description": "Hex string of the serial number", + "type": "string" + } + } + }, "handler.OrganizationResponse": { "type": "object", "properties": { @@ -4190,6 +4992,44 @@ } } }, + "handler.RevokeCertificateByPEMRequest": { + "type": "object", + "properties": { + "certificate": { + "description": "PEM encoded certificate", + "type": "string" + }, + "revocationReason": { + "type": "integer" + } + } + }, + "handler.RevokeCertificateBySerialRequest": { + "type": "object", + "properties": { + "revocationReason": { + "type": "integer" + }, + "serialNumber": { + "description": "Hex string of the serial number", + "type": "string" + } + } + }, + "handler.RevokedCertificateResponse": { + "type": "object", + "properties": { + "reason": { + "type": "integer" + }, + "revocationTime": { + "type": "string" + }, + "serialNumber": { + "type": "string" + } + } + }, "handler.UpdateOrganizationRequest": { "type": "object", "properties": { @@ -4350,98 +5190,230 @@ "targetId": { "type": "integer" }, - "updatedAt": { + "updatedAt": { + "type": "string" + } + } + }, + "http.BackupTargetResponse": { + "type": "object", + "properties": { + "accessKeyId": { + "type": "string" + }, + "bucketName": { + "type": "string" + }, + "bucketPath": { + "type": "string" + }, + "createdAt": { + "type": "string" + }, + "endpoint": { + "type": "string" + }, + "forcePathStyle": { + "type": "boolean" + }, + "id": { + "type": "integer" + }, + "name": { + "type": "string" + }, + "region": { + "type": "string" + }, + "type": { + "type": "string" + }, + "updatedAt": { + "type": "string" + } + } + }, + "http.BesuNetworkResponse": { + "type": "object", + "properties": { + "chainId": { + "type": "integer" + }, + "config": { + "type": "array", + "items": { + "type": "integer" + } + }, + "createdAt": { + "type": "string" + }, + "description": { + "type": "string" + }, + "genesisConfig": { + "type": "string" + }, + "id": { + "type": "integer" + }, + "name": { + "type": "string" + }, + "platform": { + "type": "string" + }, + "status": { + "type": "string" + }, + "updatedAt": { + "type": "string" + } + } + }, + "http.BesuNodeDefaultsResponse": { + "type": "object", + "properties": { + "defaults": { + "type": "array", + "items": { + "$ref": "#/definitions/service.BesuNodeDefaults" + } + }, + "nodeCount": { + "type": "integer" + } + } + }, + "http.BlockListResponse": { + "type": "object", + "properties": { + "blocks": { + "type": "array", + "items": { + "$ref": "#/definitions/service.Block" + } + }, + "total": { + "type": "integer" + } + } + }, + "http.BlockTransactionsResponse": { + "type": "object", + "properties": { + "block": { + "$ref": "#/definitions/service.Block" + }, + "transactions": { + "type": "array", + "items": { + "$ref": "#/definitions/service.Transaction" + } + } + } + }, + "http.ChainInfoResponse": { + "type": "object", + "properties": { + "currentBlockHash": { + "type": "string" + }, + "height": { + "type": "integer" + }, + "previousBlockHash": { "type": "string" } } }, - "http.BackupTargetResponse": { + "http.ChannelConfigResponse": { "type": "object", "properties": { - "accessKeyId": { - "type": "string" - }, - "bucketName": { - "type": "string" + "config": { + "type": "object", + "additionalProperties": true }, - "bucketPath": { + "name": { "type": "string" + } + } + }, + "http.ChannelResponse": { + "type": "object", + "properties": { + "blockNum": { + "type": "integer" }, "createdAt": { "type": "string" }, - "endpoint": { - "type": "string" - }, - "forcePathStyle": { - "type": "boolean" - }, - "id": { - "type": "integer" - }, "name": { "type": "string" - }, - "region": { - "type": "string" - }, - "type": { - "type": "string" - }, - "updatedAt": { - "type": "string" } } }, - "http.BesuNetworkResponse": { + "http.ConfigUpdateOperationRequest": { + "description": "A single configuration update operation", "type": "object", + "required": [ + "payload", + "type" + ], "properties": { - "chainId": { - "type": "integer" - }, - "config": { + "payload": { + "description": "Payload contains the operation-specific data\nThe structure depends on the operation type:\n- add_org: AddOrgPayload\n- remove_org: RemoveOrgPayload\n- update_org_msp: UpdateOrgMSPPayload\n- set_anchor_peers: SetAnchorPeersPayload\n- add_consenter: AddConsenterPayload\n- remove_consenter: RemoveConsenterPayload\n- update_consenter: UpdateConsenterPayload\n- update_etcd_raft_options: UpdateEtcdRaftOptionsPayload\n- update_batch_size: UpdateBatchSizePayload\n- update_batch_timeout: UpdateBatchTimeoutPayload\n@Description The payload for the configuration update operation\n@Description Can be one of:\n@Description - AddOrgPayload when type is \"add_org\"\n@Description - RemoveOrgPayload when type is \"remove_org\"\n@Description - UpdateOrgMSPPayload when type is \"update_org_msp\"\n@Description - SetAnchorPeersPayload when type is \"set_anchor_peers\"\n@Description - AddConsenterPayload when type is \"add_consenter\"\n@Description - RemoveConsenterPayload when type is \"remove_consenter\"\n@Description - UpdateConsenterPayload when type is \"update_consenter\"\n@Description - UpdateEtcdRaftOptionsPayload when type is \"update_etcd_raft_options\"\n@Description - UpdateBatchSizePayload when type is \"update_batch_size\"\n@Description - UpdateBatchTimeoutPayload when type is \"update_batch_timeout\"", "type": "array", "items": { "type": "integer" } }, - "createdAt": { + "type": { + "description": "Type is the type of configuration update operation\nenum: add_org,remove_org,update_org_msp,set_anchor_peers,add_consenter,remove_consenter,update_consenter,update_etcd_raft_options,update_batch_size,update_batch_timeout", + "type": "string", + "enum": [ + "add_org", + "remove_org", + "update_org_msp", + "set_anchor_peers", + "add_consenter", + "remove_consenter", + "update_consenter", + "update_etcd_raft_options", + "update_batch_size", + "update_batch_timeout" + ] + } + } + }, + "http.ConfigUpdateResponse": { + "type": "object", + "properties": { + "channel_name": { "type": "string" }, - "description": { + "created_at": { "type": "string" }, - "genesisConfig": { - "type": "array", - "items": { - "type": "integer" - } + "created_by": { + "type": "string" }, "id": { - "type": "integer" - }, - "name": { "type": "string" }, - "platform": { - "type": "string" + "network_id": { + "type": "integer" }, - "status": { - "type": "string" + "operations": { + "type": "array", + "items": { + "$ref": "#/definitions/http.ConfigUpdateOperationRequest" + } }, - "updatedAt": { + "preview_json": { "type": "string" - } - } - }, - "http.ChannelConfigResponse": { - "type": "object", - "properties": { - "config": { - "type": "object", - "additionalProperties": true }, - "name": { + "status": { "type": "string" } } @@ -4971,6 +5943,20 @@ } } }, + "http.NodeChannelsResponse": { + "type": "object", + "properties": { + "channels": { + "type": "array", + "items": { + "$ref": "#/definitions/http.ChannelResponse" + } + }, + "nodeId": { + "type": "integer" + } + } + }, "http.NodeEventResponse": { "type": "object", "properties": { @@ -5001,6 +5987,9 @@ "endpoint": { "type": "string" }, + "errorMessage": { + "type": "string" + }, "fabricOrderer": { "$ref": "#/definitions/service.FabricOrdererProperties" }, @@ -5244,6 +6233,14 @@ } } }, + "http.TransactionResponse": { + "type": "object", + "properties": { + "transaction": { + "$ref": "#/definitions/service.Transaction" + } + } + }, "http.UpdateBackupScheduleRequest": { "type": "object", "required": [ @@ -5348,6 +6345,51 @@ } } }, + "http.UpdateBesuNodeRequest": { + "type": "object", + "required": [ + "networkId", + "p2pHost", + "p2pPort", + "rpcHost", + "rpcPort" + ], + "properties": { + "bootnodes": { + "type": "array", + "items": { + "type": "string" + } + }, + "env": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "externalIp": { + "type": "string" + }, + "internalIp": { + "type": "string" + }, + "networkId": { + "type": "integer" + }, + "p2pHost": { + "type": "string" + }, + "p2pPort": { + "type": "integer" + }, + "rpcHost": { + "type": "string" + }, + "rpcPort": { + "type": "integer" + } + } + }, "http.UpdateConsenterPayload": { "type": "object", "required": [ @@ -5378,38 +6420,152 @@ "maximum": 65535, "minimum": 1 }, - "server_tls_cert": { + "server_tls_cert": { + "type": "string" + } + } + }, + "http.UpdateEtcdRaftOptionsPayload": { + "type": "object", + "required": [ + "election_tick", + "heartbeat_tick", + "max_inflight_blocks", + "snapshot_interval_size", + "tick_interval" + ], + "properties": { + "election_tick": { + "type": "integer", + "minimum": 1 + }, + "heartbeat_tick": { + "type": "integer", + "minimum": 1 + }, + "max_inflight_blocks": { + "type": "integer", + "minimum": 1 + }, + "snapshot_interval_size": { + "type": "integer", + "minimum": 1 + }, + "tick_interval": { + "type": "string" + } + } + }, + "http.UpdateFabricNetworkRequest": { + "type": "object", + "required": [ + "operations" + ], + "properties": { + "operations": { + "type": "array", + "minItems": 1, + "items": { + "$ref": "#/definitions/http.ConfigUpdateOperationRequest" + } + } + } + }, + "http.UpdateFabricOrdererRequest": { + "type": "object", + "properties": { + "adminAddress": { + "type": "string" + }, + "domainNames": { + "type": "array", + "items": { + "type": "string" + } + }, + "env": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "externalEndpoint": { + "type": "string" + }, + "listenAddress": { + "type": "string" + }, + "operationsListenAddress": { + "type": "string" + }, + "version": { + "type": "string" + } + } + }, + "http.UpdateFabricPeerRequest": { + "type": "object", + "properties": { + "addressOverrides": { + "type": "array", + "items": { + "$ref": "#/definitions/types.AddressOverride" + } + }, + "chaincodeAddress": { + "type": "string" + }, + "domainNames": { + "type": "array", + "items": { + "type": "string" + } + }, + "env": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "eventsAddress": { + "type": "string" + }, + "externalEndpoint": { + "type": "string" + }, + "listenAddress": { + "type": "string" + }, + "operationsListenAddress": { + "type": "string" + }, + "version": { "type": "string" } } }, - "http.UpdateEtcdRaftOptionsPayload": { + "http.UpdateNodeRequest": { "type": "object", - "required": [ - "election_tick", - "heartbeat_tick", - "max_inflight_blocks", - "snapshot_interval_size", - "tick_interval" - ], "properties": { - "election_tick": { - "type": "integer", - "minimum": 1 + "besuNode": { + "$ref": "#/definitions/http.UpdateBesuNodeRequest" }, - "heartbeat_tick": { - "type": "integer", - "minimum": 1 + "blockchainPlatform": { + "$ref": "#/definitions/types.BlockchainPlatform" }, - "max_inflight_blocks": { - "type": "integer", - "minimum": 1 + "fabricOrderer": { + "$ref": "#/definitions/http.UpdateFabricOrdererRequest" }, - "snapshot_interval_size": { - "type": "integer", - "minimum": 1 + "fabricPeer": { + "description": "Platform-specific configurations", + "allOf": [ + { + "$ref": "#/definitions/http.UpdateFabricPeerRequest" + } + ] }, - "tick_interval": { + "name": { + "description": "Common fields", "type": "string" } } @@ -5441,6 +6597,25 @@ } } }, + "http.UpdateOrganizationCRLRequest": { + "type": "object", + "required": [ + "organizationId" + ], + "properties": { + "organizationId": { + "type": "integer" + } + } + }, + "http.UpdateOrganizationCRLResponse": { + "type": "object", + "properties": { + "transactionId": { + "type": "string" + } + } + }, "http.UpdateProviderRequest": { "type": "object", "required": [ @@ -5513,7 +6688,7 @@ "extKeyUsage": { "type": "array", "items": { - "$ref": "#/definitions/crypto_x509.ExtKeyUsage" + "$ref": "#/definitions/x509.ExtKeyUsage" } }, "ipAddresses": { @@ -5529,7 +6704,7 @@ "type": "boolean" }, "keyUsage": { - "$ref": "#/definitions/crypto_x509.KeyUsage" + "$ref": "#/definitions/x509.KeyUsage" }, "locality": { "type": "array", @@ -5813,6 +6988,9 @@ "sha256Fingerprint": { "type": "string" }, + "signingKeyID": { + "type": "integer" + }, "status": { "type": "string" } @@ -5903,29 +7081,44 @@ "service.BesuNodeDefaults": { "type": "object", "properties": { - "externalIP": { + "environmentVariables": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "externalIp": { "type": "string" }, - "internalIP": { + "internalIp": { "type": "string" }, "mode": { "$ref": "#/definitions/service.Mode" }, - "networkId": { + "p2pHost": { + "type": "string" + }, + "p2pPort": { "type": "integer" }, - "p2pAddress": { + "rpcHost": { "type": "string" }, - "rpcAddress": { - "type": "string" + "rpcPort": { + "type": "integer" } } }, "service.BesuNodeProperties": { "type": "object", "properties": { + "bootNodes": { + "type": "array", + "items": { + "type": "string" + } + }, "enodeUrl": { "type": "string" }, @@ -5956,6 +7149,43 @@ }, "rpcPort": { "type": "integer" + }, + "version": { + "type": "string" + } + } + }, + "service.Block": { + "type": "object", + "properties": { + "data": { + "type": "array", + "items": { + "type": "integer" + } + }, + "hash": { + "type": "string" + }, + "number": { + "type": "integer" + }, + "previous_hash": { + "type": "string" + }, + "timestamp": { + "type": "string" + }, + "tx_count": { + "type": "integer" + } + } + }, + "service.CreateSettingParams": { + "type": "object", + "properties": { + "config": { + "$ref": "#/definitions/service.SettingConfig" } } }, @@ -6008,12 +7238,21 @@ }, "tlsKeyId": { "type": "integer" + }, + "version": { + "type": "string" } } }, "service.FabricPeerProperties": { "type": "object", "properties": { + "addressOverrides": { + "type": "array", + "items": { + "$ref": "#/definitions/types.AddressOverride" + } + }, "chaincodeAddress": { "type": "string" }, @@ -6063,6 +7302,9 @@ }, "tlsKeyId": { "type": "integer" + }, + "version": { + "type": "string" } } }, @@ -6118,6 +7360,9 @@ "endpoint": { "type": "string" }, + "errorMessage": { + "type": "string" + }, "id": { "type": "integer" }, @@ -6208,6 +7453,77 @@ } } }, + "service.Setting": { + "type": "object", + "properties": { + "config": { + "$ref": "#/definitions/service.SettingConfig" + }, + "created_at": { + "type": "string" + }, + "id": { + "type": "integer" + }, + "updated_at": { + "type": "string" + } + } + }, + "service.SettingConfig": { + "type": "object", + "properties": { + "besuTemplateCMD": { + "type": "string" + }, + "ordererTemplateCMD": { + "type": "string" + }, + "peerTemplateCMD": { + "type": "string" + } + } + }, + "service.Transaction": { + "type": "object", + "properties": { + "block_number": { + "type": "integer" + }, + "creator": { + "type": "string" + }, + "payload": { + "type": "array", + "items": { + "type": "integer" + } + }, + "timestamp": { + "type": "string" + }, + "tx_id": { + "type": "string" + }, + "type": { + "type": "string" + } + } + }, + "types.AddressOverride": { + "type": "object", + "properties": { + "from": { + "type": "string" + }, + "tlsCACert": { + "type": "string" + }, + "to": { + "type": "string" + } + } + }, "types.BesuNodeConfig": { "type": "object", "required": [ @@ -6288,6 +7604,13 @@ "organizationId" ], "properties": { + "addressOverrides": { + "description": "@Description Address overrides for the orderer", + "type": "array", + "items": { + "$ref": "#/definitions/types.AddressOverride" + } + }, "adminAddress": { "type": "string" }, @@ -6346,6 +7669,13 @@ "organizationId" ], "properties": { + "addressOverrides": { + "description": "@Description Address overrides for the peer", + "type": "array", + "items": { + "$ref": "#/definitions/types.AddressOverride" + } + }, "chaincodeAddress": { "description": "@Description Chaincode listen address", "type": "string", @@ -6400,6 +7730,13 @@ "type": "string", "example": "0.0.0.0:9443" }, + "ordererAddressOverrides": { + "description": "@Description Orderer address overrides for the peer", + "type": "array", + "items": { + "$ref": "#/definitions/types.OrdererAddressOverride" + } + }, "organizationId": { "description": "@Description Organization ID that owns this peer", "type": "integer", @@ -6425,6 +7762,7 @@ "STOPPED", "STOPPING", "STARTING", + "UPDATING", "ERROR" ], "x-enum-varnames": [ @@ -6433,6 +7771,7 @@ "NodeStatusStopped", "NodeStatusStopping", "NodeStatusStarting", + "NodeStatusUpdating", "NodeStatusError" ] }, @@ -6449,6 +7788,28 @@ "NodeTypeBesuFullnode" ] }, + "types.OrdererAddressOverride": { + "type": "object", + "required": [ + "from", + "tlsCACert", + "to" + ], + "properties": { + "from": { + "description": "@Description Original orderer address", + "type": "string" + }, + "tlsCACert": { + "description": "@Description TLS CA certificate in PEM format", + "type": "string" + }, + "to": { + "description": "@Description New orderer address to use", + "type": "string" + } + } + }, "url.URL": { "type": "object", "properties": { @@ -6503,6 +7864,66 @@ }, "url.Userinfo": { "type": "object" + }, + "x509.ExtKeyUsage": { + "type": "integer", + "enum": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13 + ], + "x-enum-varnames": [ + "ExtKeyUsageAny", + "ExtKeyUsageServerAuth", + "ExtKeyUsageClientAuth", + "ExtKeyUsageCodeSigning", + "ExtKeyUsageEmailProtection", + "ExtKeyUsageIPSECEndSystem", + "ExtKeyUsageIPSECTunnel", + "ExtKeyUsageIPSECUser", + "ExtKeyUsageTimeStamping", + "ExtKeyUsageOCSPSigning", + "ExtKeyUsageMicrosoftServerGatedCrypto", + "ExtKeyUsageNetscapeServerGatedCrypto", + "ExtKeyUsageMicrosoftCommercialCodeSigning", + "ExtKeyUsageMicrosoftKernelCodeSigning" + ] + }, + "x509.KeyUsage": { + "type": "integer", + "enum": [ + 1, + 2, + 4, + 8, + 16, + 32, + 64, + 128, + 256 + ], + "x-enum-varnames": [ + "KeyUsageDigitalSignature", + "KeyUsageContentCommitment", + "KeyUsageKeyEncipherment", + "KeyUsageDataEncipherment", + "KeyUsageKeyAgreement", + "KeyUsageCertSign", + "KeyUsageCRLSign", + "KeyUsageEncipherOnly", + "KeyUsageDecipherOnly" + ] } }, "securityDefinitions": { @@ -6524,10 +7945,6 @@ "description": "Key provider management operations", "name": "Providers" }, - { - "description": "Blockchain network management operations", - "name": "Networks" - }, { "description": "Network node management operations", "name": "Nodes" diff --git a/docs/swagger.yaml b/docs/swagger.yaml index 65454f0..53fb2b9 100644 --- a/docs/swagger.yaml +++ b/docs/swagger.yaml @@ -44,60 +44,6 @@ definitions: @Example "admin" type: string type: object - crypto_x509.ExtKeyUsage: - enum: - - 0 - - 1 - - 2 - - 3 - - 4 - - 5 - - 6 - - 7 - - 8 - - 9 - - 10 - - 11 - - 12 - - 13 - type: integer - x-enum-varnames: - - ExtKeyUsageAny - - ExtKeyUsageServerAuth - - ExtKeyUsageClientAuth - - ExtKeyUsageCodeSigning - - ExtKeyUsageEmailProtection - - ExtKeyUsageIPSECEndSystem - - ExtKeyUsageIPSECTunnel - - ExtKeyUsageIPSECUser - - ExtKeyUsageTimeStamping - - ExtKeyUsageOCSPSigning - - ExtKeyUsageMicrosoftServerGatedCrypto - - ExtKeyUsageNetscapeServerGatedCrypto - - ExtKeyUsageMicrosoftCommercialCodeSigning - - ExtKeyUsageMicrosoftKernelCodeSigning - crypto_x509.KeyUsage: - enum: - - 1 - - 2 - - 4 - - 8 - - 16 - - 32 - - 64 - - 128 - - 256 - type: integer - x-enum-varnames: - - KeyUsageDigitalSignature - - KeyUsageContentCommitment - - KeyUsageKeyEncipherment - - KeyUsageDataEncipherment - - KeyUsageKeyAgreement - - KeyUsageCertSign - - KeyUsageCRLSign - - KeyUsageEncipherOnly - - KeyUsageDecipherOnly github_com_chainlaunch_chainlaunch_pkg_networks_http.ErrorResponse: properties: code: @@ -126,6 +72,12 @@ definitions: - mspId - name type: object + handler.DeleteRevokedCertificateRequest: + properties: + serialNumber: + description: Hex string of the serial number + type: string + type: object handler.OrganizationResponse: properties: createdAt: @@ -151,6 +103,31 @@ definitions: updatedAt: type: string type: object + handler.RevokeCertificateByPEMRequest: + properties: + certificate: + description: PEM encoded certificate + type: string + revocationReason: + type: integer + type: object + handler.RevokeCertificateBySerialRequest: + properties: + revocationReason: + type: integer + serialNumber: + description: Hex string of the serial number + type: string + type: object + handler.RevokedCertificateResponse: + properties: + reason: + type: integer + revocationTime: + type: string + serialNumber: + type: string + type: object handler.UpdateOrganizationRequest: properties: description: @@ -301,9 +278,7 @@ definitions: description: type: string genesisConfig: - items: - type: integer - type: array + type: string id: type: integer name: @@ -315,6 +290,42 @@ definitions: updatedAt: type: string type: object + http.BesuNodeDefaultsResponse: + properties: + defaults: + items: + $ref: '#/definitions/service.BesuNodeDefaults' + type: array + nodeCount: + type: integer + type: object + http.BlockListResponse: + properties: + blocks: + items: + $ref: '#/definitions/service.Block' + type: array + total: + type: integer + type: object + http.BlockTransactionsResponse: + properties: + block: + $ref: '#/definitions/service.Block' + transactions: + items: + $ref: '#/definitions/service.Transaction' + type: array + type: object + http.ChainInfoResponse: + properties: + currentBlockHash: + type: string + height: + type: integer + previousBlockHash: + type: string + type: object http.ChannelConfigResponse: properties: config: @@ -323,6 +334,88 @@ definitions: name: type: string type: object + http.ChannelResponse: + properties: + blockNum: + type: integer + createdAt: + type: string + name: + type: string + type: object + http.ConfigUpdateOperationRequest: + description: A single configuration update operation + properties: + payload: + description: |- + Payload contains the operation-specific data + The structure depends on the operation type: + - add_org: AddOrgPayload + - remove_org: RemoveOrgPayload + - update_org_msp: UpdateOrgMSPPayload + - set_anchor_peers: SetAnchorPeersPayload + - add_consenter: AddConsenterPayload + - remove_consenter: RemoveConsenterPayload + - update_consenter: UpdateConsenterPayload + - update_etcd_raft_options: UpdateEtcdRaftOptionsPayload + - update_batch_size: UpdateBatchSizePayload + - update_batch_timeout: UpdateBatchTimeoutPayload + @Description The payload for the configuration update operation + @Description Can be one of: + @Description - AddOrgPayload when type is "add_org" + @Description - RemoveOrgPayload when type is "remove_org" + @Description - UpdateOrgMSPPayload when type is "update_org_msp" + @Description - SetAnchorPeersPayload when type is "set_anchor_peers" + @Description - AddConsenterPayload when type is "add_consenter" + @Description - RemoveConsenterPayload when type is "remove_consenter" + @Description - UpdateConsenterPayload when type is "update_consenter" + @Description - UpdateEtcdRaftOptionsPayload when type is "update_etcd_raft_options" + @Description - UpdateBatchSizePayload when type is "update_batch_size" + @Description - UpdateBatchTimeoutPayload when type is "update_batch_timeout" + items: + type: integer + type: array + type: + description: |- + Type is the type of configuration update operation + enum: add_org,remove_org,update_org_msp,set_anchor_peers,add_consenter,remove_consenter,update_consenter,update_etcd_raft_options,update_batch_size,update_batch_timeout + enum: + - add_org + - remove_org + - update_org_msp + - set_anchor_peers + - add_consenter + - remove_consenter + - update_consenter + - update_etcd_raft_options + - update_batch_size + - update_batch_timeout + type: string + required: + - payload + - type + type: object + http.ConfigUpdateResponse: + properties: + channel_name: + type: string + created_at: + type: string + created_by: + type: string + id: + type: string + network_id: + type: integer + operations: + items: + $ref: '#/definitions/http.ConfigUpdateOperationRequest' + type: array + preview_json: + type: string + status: + type: string + type: object http.ConsenterConfig: properties: id: @@ -729,6 +822,15 @@ definitions: updatedAt: type: string type: object + http.NodeChannelsResponse: + properties: + channels: + items: + $ref: '#/definitions/http.ChannelResponse' + type: array + nodeId: + type: integer + type: object http.NodeEventResponse: properties: created_at: @@ -749,6 +851,8 @@ definitions: type: string endpoint: type: string + errorMessage: + type: string fabricOrderer: $ref: '#/definitions/service.FabricOrdererProperties' fabricPeer: @@ -911,6 +1015,11 @@ definitions: testedAt: type: string type: object + http.TransactionResponse: + properties: + transaction: + $ref: '#/definitions/service.Transaction' + type: object http.UpdateBackupScheduleRequest: properties: cronExpression: @@ -983,6 +1092,37 @@ definitions: required: - timeout type: object + http.UpdateBesuNodeRequest: + properties: + bootnodes: + items: + type: string + type: array + env: + additionalProperties: + type: string + type: object + externalIp: + type: string + internalIp: + type: string + networkId: + type: integer + p2pHost: + type: string + p2pPort: + type: integer + rpcHost: + type: string + rpcPort: + type: integer + required: + - networkId + - p2pHost + - p2pPort + - rpcHost + - rpcPort + type: object http.UpdateConsenterPayload: properties: client_tls_cert: @@ -1032,6 +1172,80 @@ definitions: - snapshot_interval_size - tick_interval type: object + http.UpdateFabricNetworkRequest: + properties: + operations: + items: + $ref: '#/definitions/http.ConfigUpdateOperationRequest' + minItems: 1 + type: array + required: + - operations + type: object + http.UpdateFabricOrdererRequest: + properties: + adminAddress: + type: string + domainNames: + items: + type: string + type: array + env: + additionalProperties: + type: string + type: object + externalEndpoint: + type: string + listenAddress: + type: string + operationsListenAddress: + type: string + version: + type: string + type: object + http.UpdateFabricPeerRequest: + properties: + addressOverrides: + items: + $ref: '#/definitions/types.AddressOverride' + type: array + chaincodeAddress: + type: string + domainNames: + items: + type: string + type: array + env: + additionalProperties: + type: string + type: object + eventsAddress: + type: string + externalEndpoint: + type: string + listenAddress: + type: string + operationsListenAddress: + type: string + version: + type: string + type: object + http.UpdateNodeRequest: + properties: + besuNode: + $ref: '#/definitions/http.UpdateBesuNodeRequest' + blockchainPlatform: + $ref: '#/definitions/types.BlockchainPlatform' + fabricOrderer: + $ref: '#/definitions/http.UpdateFabricOrdererRequest' + fabricPeer: + allOf: + - $ref: '#/definitions/http.UpdateFabricPeerRequest' + description: Platform-specific configurations + name: + description: Common fields + type: string + type: object http.UpdateOrgMSPPayload: properties: msp_id: @@ -1051,6 +1265,18 @@ definitions: - root_certs - tls_root_certs type: object + http.UpdateOrganizationCRLRequest: + properties: + organizationId: + type: integer + required: + - organizationId + type: object + http.UpdateOrganizationCRLResponse: + properties: + transactionId: + type: string + type: object http.UpdateProviderRequest: properties: config: {} @@ -1096,7 +1322,7 @@ definitions: type: array extKeyUsage: items: - $ref: '#/definitions/crypto_x509.ExtKeyUsage' + $ref: '#/definitions/x509.ExtKeyUsage' type: array ipAddresses: items: @@ -1107,7 +1333,7 @@ definitions: isCA: type: boolean keyUsage: - $ref: '#/definitions/crypto_x509.KeyUsage' + $ref: '#/definitions/x509.KeyUsage' locality: items: type: string @@ -1313,6 +1539,8 @@ definitions: type: string sha256Fingerprint: type: string + signingKeyID: + type: integer status: type: string type: object @@ -1372,21 +1600,31 @@ definitions: type: object service.BesuNodeDefaults: properties: - externalIP: + environmentVariables: + additionalProperties: + type: string + type: object + externalIp: type: string - internalIP: + internalIp: type: string mode: $ref: '#/definitions/service.Mode' - networkId: - type: integer - p2pAddress: + p2pHost: type: string - rpcAddress: + p2pPort: + type: integer + rpcHost: type: string + rpcPort: + type: integer type: object service.BesuNodeProperties: properties: + bootNodes: + items: + type: string + type: array enodeUrl: type: string externalIp: @@ -1408,8 +1646,32 @@ definitions: type: string rpcPort: type: integer + version: + type: string type: object - service.FabricOrdererProperties: + service.Block: + properties: + data: + items: + type: integer + type: array + hash: + type: string + number: + type: integer + previous_hash: + type: string + timestamp: + type: string + tx_count: + type: integer + type: object + service.CreateSettingParams: + properties: + config: + $ref: '#/definitions/service.SettingConfig' + type: object + service.FabricOrdererProperties: properties: adminAddress: type: string @@ -1443,9 +1705,15 @@ definitions: type: string tlsKeyId: type: integer + version: + type: string type: object service.FabricPeerProperties: properties: + addressOverrides: + items: + $ref: '#/definitions/types.AddressOverride' + type: array chaincodeAddress: type: string domainNames: @@ -1480,6 +1748,8 @@ definitions: type: string tlsKeyId: type: integer + version: + type: string type: object service.Mode: enum: @@ -1517,6 +1787,8 @@ definitions: FabricPeerDeploymentConfig, FabricOrdererDeploymentConfig, or BesuNodeDeploymentConfig' endpoint: type: string + errorMessage: + type: string id: type: integer mspId: @@ -1576,6 +1848,52 @@ definitions: $ref: '#/definitions/service.NodeDefaults' type: array type: object + service.Setting: + properties: + config: + $ref: '#/definitions/service.SettingConfig' + created_at: + type: string + id: + type: integer + updated_at: + type: string + type: object + service.SettingConfig: + properties: + besuTemplateCMD: + type: string + ordererTemplateCMD: + type: string + peerTemplateCMD: + type: string + type: object + service.Transaction: + properties: + block_number: + type: integer + creator: + type: string + payload: + items: + type: integer + type: array + timestamp: + type: string + tx_id: + type: string + type: + type: string + type: object + types.AddressOverride: + properties: + from: + type: string + tlsCACert: + type: string + to: + type: string + type: object types.BesuNodeConfig: properties: bootNodes: @@ -1631,6 +1949,11 @@ definitions: - PlatformBesu types.FabricOrdererConfig: properties: + addressOverrides: + description: '@Description Address overrides for the orderer' + items: + $ref: '#/definitions/types.AddressOverride' + type: array adminAddress: type: string domainNames: @@ -1673,6 +1996,11 @@ definitions: types.FabricPeerConfig: description: Configuration for creating a new Fabric peer node properties: + addressOverrides: + description: '@Description Address overrides for the peer' + items: + $ref: '#/definitions/types.AddressOverride' + type: array chaincodeAddress: description: '@Description Chaincode listen address' example: 0.0.0.0:7052 @@ -1715,6 +2043,11 @@ definitions: description: '@Description Operations listen address' example: 0.0.0.0:9443 type: string + ordererAddressOverrides: + description: '@Description Orderer address overrides for the peer' + items: + $ref: '#/definitions/types.OrdererAddressOverride' + type: array organizationId: description: '@Description Organization ID that owns this peer' example: 1 @@ -1740,6 +2073,7 @@ definitions: - STOPPED - STOPPING - STARTING + - UPDATING - ERROR type: string x-enum-varnames: @@ -1748,6 +2082,7 @@ definitions: - NodeStatusStopped - NodeStatusStopping - NodeStatusStarting + - NodeStatusUpdating - NodeStatusError types.NodeType: enum: @@ -1759,6 +2094,22 @@ definitions: - NodeTypeFabricPeer - NodeTypeFabricOrderer - NodeTypeBesuFullnode + types.OrdererAddressOverride: + properties: + from: + description: '@Description Original orderer address' + type: string + tlsCACert: + description: '@Description TLS CA certificate in PEM format' + type: string + to: + description: '@Description New orderer address to use' + type: string + required: + - from + - tlsCACert + - to + type: object url.URL: properties: forceQuery: @@ -1797,12 +2148,66 @@ definitions: type: object url.Userinfo: type: object + x509.ExtKeyUsage: + enum: + - 0 + - 1 + - 2 + - 3 + - 4 + - 5 + - 6 + - 7 + - 8 + - 9 + - 10 + - 11 + - 12 + - 13 + type: integer + x-enum-varnames: + - ExtKeyUsageAny + - ExtKeyUsageServerAuth + - ExtKeyUsageClientAuth + - ExtKeyUsageCodeSigning + - ExtKeyUsageEmailProtection + - ExtKeyUsageIPSECEndSystem + - ExtKeyUsageIPSECTunnel + - ExtKeyUsageIPSECUser + - ExtKeyUsageTimeStamping + - ExtKeyUsageOCSPSigning + - ExtKeyUsageMicrosoftServerGatedCrypto + - ExtKeyUsageNetscapeServerGatedCrypto + - ExtKeyUsageMicrosoftCommercialCodeSigning + - ExtKeyUsageMicrosoftKernelCodeSigning + x509.KeyUsage: + enum: + - 1 + - 2 + - 4 + - 8 + - 16 + - 32 + - 64 + - 128 + - 256 + type: integer + x-enum-varnames: + - KeyUsageDigitalSignature + - KeyUsageContentCommitment + - KeyUsageKeyEncipherment + - KeyUsageDataEncipherment + - KeyUsageKeyAgreement + - KeyUsageCertSign + - KeyUsageCRLSign + - KeyUsageEncipherOnly + - KeyUsageDecipherOnly host: localhost:8100 info: contact: - email: support@chainlaunch.com + email: support@chainlaunch.dev name: API Support - url: http://www.chainlaunch.com/support + url: http://chainlaunch.dev/support description: ChainLaunch API provides services for managing blockchain networks and cryptographic keys license: @@ -1905,7 +2310,7 @@ paths: $ref: '#/definitions/response.Response' summary: List all backups tags: - - backups + - Backups post: consumes: - application/json @@ -1934,7 +2339,7 @@ paths: $ref: '#/definitions/response.Response' summary: Create a new backup tags: - - backups + - Backups /backups/{id}: delete: consumes: @@ -1965,7 +2370,7 @@ paths: $ref: '#/definitions/response.Response' summary: Delete a backup tags: - - backups + - Backups get: consumes: - application/json @@ -1997,7 +2402,7 @@ paths: $ref: '#/definitions/response.Response' summary: Get a backup by ID tags: - - backups + - Backups /backups/schedules: get: consumes: @@ -2018,7 +2423,7 @@ paths: $ref: '#/definitions/response.Response' summary: List all backup schedules tags: - - backup-schedules + - Backup Schedules post: consumes: - application/json @@ -2047,7 +2452,7 @@ paths: $ref: '#/definitions/response.Response' summary: Create a new backup schedule tags: - - backup-schedules + - Backup Schedules /backups/schedules/{id}: delete: consumes: @@ -2078,7 +2483,7 @@ paths: $ref: '#/definitions/response.Response' summary: Delete a backup schedule tags: - - backup-schedules + - Backup Schedules get: consumes: - application/json @@ -2110,7 +2515,7 @@ paths: $ref: '#/definitions/response.Response' summary: Get a backup schedule by ID tags: - - backup-schedules + - Backup Schedules put: consumes: - application/json @@ -2148,7 +2553,7 @@ paths: $ref: '#/definitions/response.Response' summary: Update a backup schedule tags: - - backup-schedules + - Backup Schedules /backups/schedules/{id}/disable: put: consumes: @@ -2181,7 +2586,7 @@ paths: $ref: '#/definitions/response.Response' summary: Disable a backup schedule tags: - - backup-schedules + - Backup Schedules /backups/schedules/{id}/enable: put: consumes: @@ -2214,7 +2619,7 @@ paths: $ref: '#/definitions/response.Response' summary: Enable a backup schedule tags: - - backup-schedules + - Backup Schedules /backups/targets: get: consumes: @@ -2235,7 +2640,7 @@ paths: $ref: '#/definitions/response.Response' summary: List all backup targets tags: - - backup-targets + - Backup Targets post: consumes: - application/json @@ -2264,7 +2669,7 @@ paths: $ref: '#/definitions/response.Response' summary: Create a new backup target tags: - - backup-targets + - Backup Targets /backups/targets/{id}: delete: consumes: @@ -2295,7 +2700,7 @@ paths: $ref: '#/definitions/response.Response' summary: Delete a backup target tags: - - backup-targets + - Backup Targets get: consumes: - application/json @@ -2327,7 +2732,7 @@ paths: $ref: '#/definitions/response.Response' summary: Get a backup target by ID tags: - - backup-targets + - Backup Targets put: consumes: - application/json @@ -2365,7 +2770,7 @@ paths: $ref: '#/definitions/response.Response' summary: Update a backup target tags: - - backup-targets + - Backup Targets /dummy: post: consumes: @@ -2423,7 +2828,7 @@ paths: $ref: '#/definitions/http.UpdateBatchTimeoutPayload' summary: Submit config update proposal tags: - - fabric-networks + - Fabric Networks /key-providers: get: consumes: @@ -2446,7 +2851,7 @@ paths: type: object summary: List all key providers tags: - - providers + - Providers post: consumes: - application/json @@ -2479,7 +2884,7 @@ paths: type: object summary: Create a new key provider tags: - - providers + - Providers /key-providers/{id}: delete: consumes: @@ -2522,7 +2927,7 @@ paths: type: object summary: Delete a provider tags: - - providers + - Providers get: consumes: - application/json @@ -2560,7 +2965,7 @@ paths: type: object summary: Get a specific provider tags: - - providers + - Providers /keys: get: consumes: @@ -2592,7 +2997,7 @@ paths: type: object summary: Get paginated keys tags: - - keys + - Keys post: consumes: - application/json @@ -2627,7 +3032,7 @@ paths: - ApiKeyAuth: [] summary: Create a new key tags: - - keys + - Keys /keys/{id}: delete: consumes: @@ -2664,7 +3069,7 @@ paths: type: object summary: Delete a key tags: - - keys + - Keys get: consumes: - application/json @@ -2702,7 +3107,7 @@ paths: type: object summary: Get a specific key by ID tags: - - keys + - Keys /keys/{keyID}/sign: post: consumes: @@ -2747,7 +3152,7 @@ paths: type: object summary: Sign a certificate tags: - - keys + - Keys /keys/all: get: consumes: @@ -2770,7 +3175,7 @@ paths: type: object summary: Get all keys tags: - - keys + - Keys /keys/filter: get: consumes: @@ -2816,7 +3221,7 @@ paths: type: object summary: Filter keys by algorithm and curve tags: - - keys + - Keys /networks/besu: get: description: Get a paginated list of Besu networks @@ -2846,7 +3251,7 @@ paths: $ref: '#/definitions/github_com_chainlaunch_chainlaunch_pkg_networks_http.ErrorResponse' summary: List Besu networks tags: - - besu-networks + - Besu Networks post: consumes: - application/json @@ -2861,8 +3266,8 @@ paths: produces: - application/json responses: - "201": - description: Created + "200": + description: OK schema: $ref: '#/definitions/http.BesuNetworkResponse' "400": @@ -2875,7 +3280,7 @@ paths: $ref: '#/definitions/github_com_chainlaunch_chainlaunch_pkg_networks_http.ErrorResponse' summary: Create a new Besu network tags: - - besu-networks + - Besu Networks /networks/besu/{id}: delete: description: Delete an existing Besu network and all its resources @@ -2904,7 +3309,7 @@ paths: $ref: '#/definitions/github_com_chainlaunch_chainlaunch_pkg_networks_http.ErrorResponse' summary: Delete a Besu network tags: - - besu-networks + - Besu Networks get: description: Get details of a specific Besu network parameters: @@ -2934,7 +3339,7 @@ paths: $ref: '#/definitions/github_com_chainlaunch_chainlaunch_pkg_networks_http.ErrorResponse' summary: Get a Besu network by ID tags: - - besu-networks + - Besu Networks /networks/besu/import: post: consumes: @@ -2964,7 +3369,7 @@ paths: $ref: '#/definitions/github_com_chainlaunch_chainlaunch_pkg_networks_http.ErrorResponse' summary: Import a Besu network tags: - - besu-networks + - Besu Networks /networks/fabric: get: description: Get a paginated list of Fabric networks @@ -2994,7 +3399,7 @@ paths: $ref: '#/definitions/github_com_chainlaunch_chainlaunch_pkg_networks_http.ErrorResponse' summary: List Fabric networks tags: - - fabric-networks + - Fabric Networks post: consumes: - application/json @@ -3023,7 +3428,7 @@ paths: $ref: '#/definitions/github_com_chainlaunch_chainlaunch_pkg_networks_http.ErrorResponse' summary: Create a new Fabric network tags: - - fabric-networks + - Fabric Networks /networks/fabric/{id}: delete: description: Delete an existing Fabric network and all its resources @@ -3052,7 +3457,7 @@ paths: $ref: '#/definitions/github_com_chainlaunch_chainlaunch_pkg_networks_http.ErrorResponse' summary: Delete a Fabric network tags: - - fabric-networks + - Fabric Networks get: description: Get details of a specific Fabric network parameters: @@ -3082,7 +3487,7 @@ paths: $ref: '#/definitions/github_com_chainlaunch_chainlaunch_pkg_networks_http.ErrorResponse' summary: Get a Fabric network by ID tags: - - fabric-networks + - Fabric Networks /networks/fabric/{id}/anchor-peers: post: consumes: @@ -3117,7 +3522,86 @@ paths: $ref: '#/definitions/github_com_chainlaunch_chainlaunch_pkg_networks_http.ErrorResponse' summary: Set anchor peers for an organization tags: - - fabric-networks + - Fabric Networks + /networks/fabric/{id}/blocks: + get: + description: Get a paginated list of blocks from a Fabric network + parameters: + - description: Network ID + in: path + name: id + required: true + type: integer + - description: 'Number of blocks to return (default: 10)' + in: query + name: limit + type: integer + - description: 'Number of blocks to skip (default: 0)' + in: query + name: offset + type: integer + - description: 'Get blocks in reverse order (default: false)' + in: query + name: reverse + type: boolean + produces: + - application/json + responses: + "200": + description: OK + schema: + $ref: '#/definitions/http.BlockListResponse' + "400": + description: Bad Request + schema: + $ref: '#/definitions/github_com_chainlaunch_chainlaunch_pkg_networks_http.ErrorResponse' + "404": + description: Not Found + schema: + $ref: '#/definitions/github_com_chainlaunch_chainlaunch_pkg_networks_http.ErrorResponse' + "500": + description: Internal Server Error + schema: + $ref: '#/definitions/github_com_chainlaunch_chainlaunch_pkg_networks_http.ErrorResponse' + summary: Get list of blocks from Fabric network + tags: + - Fabric Networks + /networks/fabric/{id}/blocks/{blockNum}: + get: + description: Get all transactions from a specific block in a Fabric network + parameters: + - description: Network ID + in: path + name: id + required: true + type: integer + - description: Block Number + in: path + name: blockNum + required: true + type: integer + produces: + - application/json + responses: + "200": + description: OK + schema: + $ref: '#/definitions/http.BlockTransactionsResponse' + "400": + description: Bad Request + schema: + $ref: '#/definitions/github_com_chainlaunch_chainlaunch_pkg_networks_http.ErrorResponse' + "404": + description: Not Found + schema: + $ref: '#/definitions/github_com_chainlaunch_chainlaunch_pkg_networks_http.ErrorResponse' + "500": + description: Internal Server Error + schema: + $ref: '#/definitions/github_com_chainlaunch_chainlaunch_pkg_networks_http.ErrorResponse' + summary: Get transactions from a specific block + tags: + - Fabric Networks /networks/fabric/{id}/channel-config: get: description: Retrieve the channel configuration for a Fabric network @@ -3144,7 +3628,7 @@ paths: $ref: '#/definitions/github_com_chainlaunch_chainlaunch_pkg_networks_http.ErrorResponse' summary: Get Fabric network channel configuration tags: - - fabric-networks + - Fabric Networks /networks/fabric/{id}/current-channel-config: get: description: Retrieve the current channel configuration for a Fabric network @@ -3171,7 +3655,41 @@ paths: $ref: '#/definitions/github_com_chainlaunch_chainlaunch_pkg_networks_http.ErrorResponse' summary: Get Fabric network current channel configuration tags: - - fabric-networks + - Fabric Networks + /networks/fabric/{id}/info: + get: + consumes: + - application/json + description: Retrieve detailed information about the Fabric blockchain including + height and block hashes + parameters: + - description: Network ID + in: path + name: id + required: true + type: integer + produces: + - application/json + responses: + "200": + description: OK + schema: + $ref: '#/definitions/http.ChainInfoResponse' + "400": + description: Bad Request + schema: + $ref: '#/definitions/github_com_chainlaunch_chainlaunch_pkg_networks_http.ErrorResponse' + "404": + description: Not Found + schema: + $ref: '#/definitions/github_com_chainlaunch_chainlaunch_pkg_networks_http.ErrorResponse' + "500": + description: Internal Server Error + schema: + $ref: '#/definitions/github_com_chainlaunch_chainlaunch_pkg_networks_http.ErrorResponse' + summary: Get Fabric chain information + tags: + - Fabric Networks /networks/fabric/{id}/nodes: get: description: Get all nodes associated with a network @@ -3202,7 +3720,7 @@ paths: $ref: '#/definitions/github_com_chainlaunch_chainlaunch_pkg_networks_http.ErrorResponse' summary: Get network nodes tags: - - fabric-networks + - Fabric Networks post: consumes: - application/json @@ -3236,7 +3754,7 @@ paths: $ref: '#/definitions/github_com_chainlaunch_chainlaunch_pkg_networks_http.ErrorResponse' summary: Add node to network tags: - - fabric-networks + - Fabric Networks /networks/fabric/{id}/orderers/{ordererId}: delete: consumes: @@ -3270,7 +3788,7 @@ paths: $ref: '#/definitions/github_com_chainlaunch_chainlaunch_pkg_networks_http.ErrorResponse' summary: Remove orderer from Fabric network tags: - - fabric-networks + - Fabric Networks /networks/fabric/{id}/orderers/{ordererId}/join: post: consumes: @@ -3304,7 +3822,7 @@ paths: $ref: '#/definitions/github_com_chainlaunch_chainlaunch_pkg_networks_http.ErrorResponse' summary: Join orderer to Fabric network tags: - - fabric-networks + - Fabric Networks /networks/fabric/{id}/orderers/{ordererId}/unjoin: post: consumes: @@ -3338,7 +3856,47 @@ paths: $ref: '#/definitions/github_com_chainlaunch_chainlaunch_pkg_networks_http.ErrorResponse' summary: Unjoin orderer from Fabric network tags: - - fabric-networks + - Fabric Networks + /networks/fabric/{id}/organization-crl: + post: + consumes: + - application/json + description: Update the Certificate Revocation List (CRL) for an organization + in the network + parameters: + - description: Network ID + in: path + name: id + required: true + type: integer + - description: Organization CRL update request + in: body + name: request + required: true + schema: + $ref: '#/definitions/http.UpdateOrganizationCRLRequest' + produces: + - application/json + responses: + "200": + description: OK + schema: + $ref: '#/definitions/http.UpdateOrganizationCRLResponse' + "400": + description: Bad Request + schema: + $ref: '#/definitions/github_com_chainlaunch_chainlaunch_pkg_networks_http.ErrorResponse' + "404": + description: Not Found + schema: + $ref: '#/definitions/github_com_chainlaunch_chainlaunch_pkg_networks_http.ErrorResponse' + "500": + description: Internal Server Error + schema: + $ref: '#/definitions/github_com_chainlaunch_chainlaunch_pkg_networks_http.ErrorResponse' + summary: Update organization CRL + tags: + - Fabric Networks /networks/fabric/{id}/organizations/{orgId}/config: get: description: Get the network configuration as YAML @@ -3374,7 +3932,7 @@ paths: $ref: '#/definitions/github_com_chainlaunch_chainlaunch_pkg_networks_http.ErrorResponse' summary: Get network configuration tags: - - fabric-networks + - Fabric Networks /networks/fabric/{id}/peers/{peerId}: delete: consumes: @@ -3408,7 +3966,7 @@ paths: $ref: '#/definitions/github_com_chainlaunch_chainlaunch_pkg_networks_http.ErrorResponse' summary: Remove peer from Fabric network tags: - - fabric-networks + - Fabric Networks /networks/fabric/{id}/peers/{peerId}/join: post: consumes: @@ -3442,7 +4000,7 @@ paths: $ref: '#/definitions/github_com_chainlaunch_chainlaunch_pkg_networks_http.ErrorResponse' summary: Join peer to Fabric network tags: - - fabric-networks + - Fabric Networks /networks/fabric/{id}/peers/{peerId}/unjoin: post: consumes: @@ -3476,7 +4034,7 @@ paths: $ref: '#/definitions/github_com_chainlaunch_chainlaunch_pkg_networks_http.ErrorResponse' summary: Unjoin peer from Fabric network tags: - - fabric-networks + - Fabric Networks /networks/fabric/{id}/reload-block: post: consumes: @@ -3517,14 +4075,20 @@ paths: type: object summary: Reload network config block tags: - - fabric-networks - /networks/fabric/by-name/{name}: + - Fabric Networks + /networks/fabric/{id}/transactions/{txId}: get: - description: Get details of a specific Fabric network using its slug + description: Get detailed information about a specific transaction in a Fabric + network parameters: - - description: Network Slug + - description: Network ID in: path - name: slug + name: id + required: true + type: integer + - description: Transaction ID + in: path + name: txId required: true type: string produces: @@ -3533,7 +4097,7 @@ paths: "200": description: OK schema: - $ref: '#/definitions/http.NetworkResponse' + $ref: '#/definitions/http.TransactionResponse' "400": description: Bad Request schema: @@ -3546,16 +4110,94 @@ paths: description: Internal Server Error schema: $ref: '#/definitions/github_com_chainlaunch_chainlaunch_pkg_networks_http.ErrorResponse' - summary: Get a Fabric network by slug + summary: Get transaction details by transaction ID tags: - - fabric-networks - /networks/fabric/import: + - Fabric Networks + /networks/fabric/{id}/update-config: post: consumes: - application/json - description: Import an existing Fabric network using its genesis block + description: |- + Prepare a config update proposal for a Fabric network using the provided operations. + The following operation types are supported: + - add_org: Add a new organization to the channel + - remove_org: Remove an organization from the channel + - update_org_msp: Update an organization's MSP configuration + - set_anchor_peers: Set anchor peers for an organization + - add_consenter: Add a new consenter to the orderer + - remove_consenter: Remove a consenter from the orderer + - update_consenter: Update a consenter in the orderer + - update_etcd_raft_options: Update etcd raft options for the orderer + - update_batch_size: Update batch size for the orderer + - update_batch_timeout: Update batch timeout for the orderer parameters: - - description: Import network request + - description: Network ID + in: path + name: id + required: true + type: integer + - description: Config update operations + in: body + name: request + required: true + schema: + $ref: '#/definitions/http.UpdateFabricNetworkRequest' + produces: + - application/json + responses: + "200": + description: OK + schema: + $ref: '#/definitions/http.ConfigUpdateResponse' + "400": + description: Bad Request + schema: + $ref: '#/definitions/github_com_chainlaunch_chainlaunch_pkg_networks_http.ErrorResponse' + "500": + description: Internal Server Error + schema: + $ref: '#/definitions/github_com_chainlaunch_chainlaunch_pkg_networks_http.ErrorResponse' + summary: Prepare a config update for a Fabric network + tags: + - Fabric Networks + /networks/fabric/by-name/{name}: + get: + description: Get details of a specific Fabric network using its slug + parameters: + - description: Network Slug + in: path + name: slug + required: true + type: string + produces: + - application/json + responses: + "200": + description: OK + schema: + $ref: '#/definitions/http.NetworkResponse' + "400": + description: Bad Request + schema: + $ref: '#/definitions/github_com_chainlaunch_chainlaunch_pkg_networks_http.ErrorResponse' + "404": + description: Not Found + schema: + $ref: '#/definitions/github_com_chainlaunch_chainlaunch_pkg_networks_http.ErrorResponse' + "500": + description: Internal Server Error + schema: + $ref: '#/definitions/github_com_chainlaunch_chainlaunch_pkg_networks_http.ErrorResponse' + summary: Get a Fabric network by slug + tags: + - Fabric Networks + /networks/fabric/import: + post: + consumes: + - application/json + description: Import an existing Fabric network using its genesis block + parameters: + - description: Import network request in: body name: request required: true @@ -3578,7 +4220,7 @@ paths: $ref: '#/definitions/github_com_chainlaunch_chainlaunch_pkg_networks_http.ErrorResponse' summary: Import a Fabric network tags: - - fabric-networks + - Fabric Networks /networks/fabric/import-with-org: post: consumes: @@ -3608,7 +4250,7 @@ paths: $ref: '#/definitions/github_com_chainlaunch_chainlaunch_pkg_networks_http.ErrorResponse' summary: Import a Fabric network with organization tags: - - fabric-networks + - Fabric Networks /nodes: get: consumes: @@ -3646,7 +4288,7 @@ paths: $ref: '#/definitions/response.ErrorResponse' summary: List all nodes tags: - - nodes + - Nodes post: consumes: - application/json @@ -3675,7 +4317,7 @@ paths: $ref: '#/definitions/response.ErrorResponse' summary: Create a new node tags: - - nodes + - Nodes /nodes/{id}: delete: consumes: @@ -3706,7 +4348,7 @@ paths: $ref: '#/definitions/response.ErrorResponse' summary: Delete a node tags: - - nodes + - Nodes get: consumes: - application/json @@ -3738,7 +4380,111 @@ paths: $ref: '#/definitions/response.ErrorResponse' summary: Get a node tags: - - nodes + - Nodes + put: + consumes: + - application/json + description: Updates an existing node's configuration based on its type + parameters: + - description: Node ID + in: path + name: id + required: true + type: integer + - description: Update node request + in: body + name: request + required: true + schema: + $ref: '#/definitions/http.UpdateNodeRequest' + produces: + - application/json + responses: + "200": + description: OK + schema: + $ref: '#/definitions/http.NodeResponse' + "400": + description: Validation error + schema: + $ref: '#/definitions/response.ErrorResponse' + "404": + description: Node not found + schema: + $ref: '#/definitions/response.ErrorResponse' + "500": + description: Internal server error + schema: + $ref: '#/definitions/response.ErrorResponse' + summary: Update a node + tags: + - Nodes + /nodes/{id}/certificates/renew: + post: + consumes: + - application/json + description: Renews the TLS and signing certificates for a Fabric node + parameters: + - description: Node ID + in: path + name: id + required: true + type: integer + produces: + - application/json + responses: + "200": + description: OK + schema: + $ref: '#/definitions/http.NodeResponse' + "400": + description: Validation error + schema: + $ref: '#/definitions/response.ErrorResponse' + "404": + description: Node not found + schema: + $ref: '#/definitions/response.ErrorResponse' + "500": + description: Internal server error + schema: + $ref: '#/definitions/response.ErrorResponse' + summary: Renew node certificates + tags: + - Nodes + /nodes/{id}/channels: + get: + consumes: + - application/json + description: Retrieves all channels for a specific Fabric node + parameters: + - description: Node ID + in: path + name: id + required: true + type: integer + produces: + - application/json + responses: + "200": + description: OK + schema: + $ref: '#/definitions/http.NodeChannelsResponse' + "400": + description: Validation error + schema: + $ref: '#/definitions/response.ErrorResponse' + "404": + description: Node not found + schema: + $ref: '#/definitions/response.ErrorResponse' + "500": + description: Internal server error + schema: + $ref: '#/definitions/response.ErrorResponse' + summary: Get channels for a Fabric node + tags: + - Nodes /nodes/{id}/events: get: consumes: @@ -3781,7 +4527,7 @@ paths: $ref: '#/definitions/response.ErrorResponse' summary: Get node events tags: - - nodes + - Nodes /nodes/{id}/logs: get: consumes: @@ -3824,7 +4570,7 @@ paths: $ref: '#/definitions/response.ErrorResponse' summary: Tail node logs tags: - - nodes + - Nodes /nodes/{id}/restart: post: consumes: @@ -3857,7 +4603,7 @@ paths: $ref: '#/definitions/response.ErrorResponse' summary: Restart a node tags: - - nodes + - Nodes /nodes/{id}/start: post: consumes: @@ -3890,7 +4636,7 @@ paths: $ref: '#/definitions/response.ErrorResponse' summary: Start a node tags: - - nodes + - Nodes /nodes/{id}/stop: post: consumes: @@ -3923,24 +4669,33 @@ paths: $ref: '#/definitions/response.ErrorResponse' summary: Stop a node tags: - - nodes + - Nodes /nodes/defaults/besu-node: get: description: Get default configuration values for a Besu node + parameters: + - default: 1 + description: Number of Besu nodes + in: query + minimum: 0 + name: besuNodes + type: integer produces: - application/json responses: "200": description: OK schema: - $ref: '#/definitions/service.BesuNodeDefaults' + items: + $ref: '#/definitions/http.BesuNodeDefaultsResponse' + type: array "500": description: Internal server error schema: $ref: '#/definitions/response.ErrorResponse' summary: Get default values for Besu node tags: - - nodes + - Nodes /nodes/defaults/fabric: get: description: Get default configuration values for multiple Fabric nodes @@ -3982,7 +4737,7 @@ paths: $ref: '#/definitions/response.ErrorResponse' summary: Get default values for multiple Fabric nodes tags: - - nodes + - Nodes /nodes/defaults/fabric-orderer: get: description: Get default configuration values for a Fabric orderer node @@ -3999,7 +4754,7 @@ paths: $ref: '#/definitions/response.ErrorResponse' summary: Get default values for Fabric orderer node tags: - - nodes + - Nodes /nodes/defaults/fabric-peer: get: description: Get default configuration values for a Fabric peer node @@ -4016,7 +4771,7 @@ paths: $ref: '#/definitions/response.ErrorResponse' summary: Get default values for Fabric peer node tags: - - nodes + - Nodes /nodes/platform/{platform}: get: consumes: @@ -4058,7 +4813,7 @@ paths: $ref: '#/definitions/response.ErrorResponse' summary: List nodes by platform tags: - - nodes + - Nodes /notifications/providers: get: consumes: @@ -4079,7 +4834,7 @@ paths: $ref: '#/definitions/github_com_chainlaunch_chainlaunch_pkg_notifications_http.ErrorResponse' summary: List notification providers tags: - - notifications + - Notifications post: consumes: - application/json @@ -4108,7 +4863,7 @@ paths: $ref: '#/definitions/github_com_chainlaunch_chainlaunch_pkg_notifications_http.ErrorResponse' summary: Create a notification provider tags: - - notifications + - Notifications /notifications/providers/{id}: delete: consumes: @@ -4139,7 +4894,7 @@ paths: $ref: '#/definitions/github_com_chainlaunch_chainlaunch_pkg_notifications_http.ErrorResponse' summary: Delete a notification provider tags: - - notifications + - Notifications get: consumes: - application/json @@ -4171,7 +4926,7 @@ paths: $ref: '#/definitions/github_com_chainlaunch_chainlaunch_pkg_notifications_http.ErrorResponse' summary: Get a notification provider tags: - - notifications + - Notifications put: consumes: - application/json @@ -4209,7 +4964,7 @@ paths: $ref: '#/definitions/github_com_chainlaunch_chainlaunch_pkg_notifications_http.ErrorResponse' summary: Update a notification provider tags: - - notifications + - Notifications /notifications/providers/{id}/test: post: consumes: @@ -4244,7 +4999,7 @@ paths: $ref: '#/definitions/github_com_chainlaunch_chainlaunch_pkg_notifications_http.ErrorResponse' summary: Test a notification provider tags: - - notifications + - Notifications /organizations: get: consumes: @@ -4267,7 +5022,7 @@ paths: type: object summary: List all Fabric organizations tags: - - organizations + - Organizations post: consumes: - application/json @@ -4300,7 +5055,7 @@ paths: type: object summary: Create a new Fabric organization tags: - - organizations + - Organizations /organizations/{id}: delete: consumes: @@ -4331,7 +5086,7 @@ paths: type: object summary: Delete a Fabric organization tags: - - organizations + - Organizations get: consumes: - application/json @@ -4363,7 +5118,7 @@ paths: type: object summary: Get a Fabric organization tags: - - organizations + - Organizations put: consumes: - application/json @@ -4407,7 +5162,205 @@ paths: type: object summary: Update a Fabric organization tags: - - organizations + - Organizations + /organizations/{id}/crl: + get: + consumes: + - application/json + description: Get the current Certificate Revocation List for the organization + parameters: + - description: Organization ID + in: path + name: id + required: true + type: integer + produces: + - application/x-pem-file + responses: + "200": + description: PEM encoded CRL + schema: + type: string + "400": + description: Bad Request + schema: + additionalProperties: + type: string + type: object + "500": + description: Internal Server Error + schema: + additionalProperties: + type: string + type: object + summary: Get organization's CRL + tags: + - Organizations + /organizations/{id}/crl/revoke/pem: + post: + consumes: + - application/json + description: Add a certificate to the organization's CRL using its PEM encoded + data + parameters: + - description: Organization ID + in: path + name: id + required: true + type: integer + - description: Certificate revocation request + in: body + name: request + required: true + schema: + $ref: '#/definitions/handler.RevokeCertificateByPEMRequest' + produces: + - application/json + responses: + "200": + description: OK + schema: + additionalProperties: + type: string + type: object + "400": + description: Bad Request + schema: + additionalProperties: + type: string + type: object + "500": + description: Internal Server Error + schema: + additionalProperties: + type: string + type: object + summary: Revoke a certificate using PEM data + tags: + - Organizations + /organizations/{id}/crl/revoke/serial: + delete: + consumes: + - application/json + description: Remove a certificate from the organization's CRL using its serial + number + parameters: + - description: Organization ID + in: path + name: id + required: true + type: integer + - description: Certificate deletion request + in: body + name: request + required: true + schema: + $ref: '#/definitions/handler.DeleteRevokedCertificateRequest' + produces: + - application/json + responses: + "200": + description: OK + schema: + additionalProperties: + type: string + type: object + "400": + description: Bad Request + schema: + additionalProperties: + type: string + type: object + "404": + description: Not Found + schema: + additionalProperties: + type: string + type: object + "500": + description: Internal Server Error + schema: + additionalProperties: + type: string + type: object + summary: Delete a revoked certificate using its serial number + tags: + - Organizations + post: + consumes: + - application/json + description: Add a certificate to the organization's CRL using its serial number + parameters: + - description: Organization ID + in: path + name: id + required: true + type: integer + - description: Certificate revocation request + in: body + name: request + required: true + schema: + $ref: '#/definitions/handler.RevokeCertificateBySerialRequest' + produces: + - application/json + responses: + "200": + description: OK + schema: + additionalProperties: + type: string + type: object + "400": + description: Bad Request + schema: + additionalProperties: + type: string + type: object + "500": + description: Internal Server Error + schema: + additionalProperties: + type: string + type: object + summary: Revoke a certificate using its serial number + tags: + - Organizations + /organizations/{id}/revoked-certificates: + get: + consumes: + - application/json + description: Get all revoked certificates for the organization + parameters: + - description: Organization ID + in: path + name: id + required: true + type: integer + produces: + - application/json + responses: + "200": + description: OK + schema: + items: + $ref: '#/definitions/handler.RevokedCertificateResponse' + type: array + "400": + description: Bad Request + schema: + additionalProperties: + type: string + type: object + "500": + description: Internal Server Error + schema: + additionalProperties: + type: string + type: object + summary: Get organization's revoked certificates + tags: + - Organizations /organizations/by-mspid/{mspid}: get: consumes: @@ -4440,7 +5393,41 @@ paths: type: object summary: Get a Fabric organization by MSP ID tags: - - organizations + - Organizations + /settings: + get: + description: Get the default setting's details + produces: + - application/json + responses: + "200": + description: OK + schema: + $ref: '#/definitions/service.Setting' + summary: Get the default setting + tags: + - Settings + post: + consumes: + - application/json + description: Create or update the default setting with the provided configuration + parameters: + - description: Setting configuration + in: body + name: setting + required: true + schema: + $ref: '#/definitions/service.CreateSettingParams' + produces: + - application/json + responses: + "200": + description: OK + schema: + $ref: '#/definitions/service.Setting' + summary: Create or update the default setting + tags: + - Settings schemes: - http - https @@ -4457,7 +5444,5 @@ tags: name: Keys - description: Key provider management operations name: Providers -- description: Blockchain network management operations - name: Networks - description: Network node management operations name: Nodes diff --git a/go.mod b/go.mod index 4ad4c47..384b4f6 100644 --- a/go.mod +++ b/go.mod @@ -12,93 +12,61 @@ require ( github.com/golang-migrate/migrate/v4 v4.18.1 github.com/golang/protobuf v1.5.4 github.com/google/uuid v1.6.0 - github.com/hyperledger/fabric v2.1.1+incompatible - github.com/hyperledger/fabric-config v0.1.0 - github.com/hyperledger/fabric-protos-go v0.3.0 + github.com/hyperledger/fabric-config v0.3.0 github.com/mattn/go-sqlite3 v1.14.24 - github.com/nuts-foundation/go-did v0.15.0 github.com/pkg/errors v0.9.1 github.com/robfig/cron/v3 v3.0.1 github.com/spf13/cobra v1.8.1 github.com/swaggo/http-swagger v1.3.4 github.com/swaggo/swag v1.16.4 go.uber.org/zap v1.27.0 - golang.org/x/crypto v0.33.0 - golang.org/x/text v0.22.0 + golang.org/x/crypto v0.37.0 + golang.org/x/net v0.37.0 // indirect + golang.org/x/text v0.24.0 gopkg.in/mail.v2 v2.3.1 ) -require github.com/hyperledger/fabric-sdk-go v1.0.0 +require ( + github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible + github.com/hyperledger/fabric-gateway v1.5.0 + github.com/hyperledger/fabric-protos-go-apiv2 v0.3.3 + github.com/stretchr/testify v1.10.0 + google.golang.org/grpc v1.71.0 + google.golang.org/protobuf v1.36.5 + gopkg.in/yaml.v3 v3.0.1 +) require ( dario.cat/mergo v1.0.1 // indirect - github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible // indirect github.com/Masterminds/goutils v1.1.1 // indirect github.com/Masterminds/semver/v3 v3.3.1 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect - github.com/beorn7/perks v1.0.1 // indirect - github.com/cespare/xxhash/v2 v2.3.0 // indirect - github.com/cloudflare/cfssl v1.4.1 // indirect github.com/containerd/log v0.1.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 // indirect github.com/distribution/reference v0.6.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect - github.com/fsnotify/fsnotify v1.6.0 // indirect github.com/gabriel-vasile/mimetype v1.4.8 // indirect - github.com/go-kit/kit v0.10.0 // indirect - github.com/go-logfmt/logfmt v0.5.0 // indirect github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-playground/locales v0.14.1 // indirect github.com/go-playground/universal-translator v0.18.1 // indirect - github.com/goccy/go-json v0.10.3 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/mock v1.4.4 // indirect - github.com/google/certificate-transparency-go v1.0.21 // indirect - github.com/hashicorp/hcl v1.0.0 // indirect + github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 // indirect github.com/holiman/uint256 v1.3.2 // indirect github.com/huandu/xstrings v1.5.0 // indirect - github.com/hyperledger/fabric-amcl v0.0.0-20230602173724-9e02669dceb2 // indirect - github.com/hyperledger/fabric-lib-go v1.0.0 // indirect github.com/leodido/go-urn v1.4.0 // indirect - github.com/lestrrat-go/blackmagic v1.0.2 // indirect - github.com/lestrrat-go/httpcc v1.0.1 // indirect - github.com/lestrrat-go/httprc v1.0.6 // indirect - github.com/lestrrat-go/iter v1.0.2 // indirect - github.com/lestrrat-go/jwx/v2 v2.1.1 // indirect - github.com/lestrrat-go/option v1.0.1 // indirect - github.com/magiconair/properties v1.8.5 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect + github.com/miekg/pkcs11 v1.1.1 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect - github.com/mitchellh/mapstructure v1.4.1 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/moby/docker-image-spec v1.3.1 // indirect - github.com/mr-tron/base58 v1.1.0 // indirect - github.com/multiformats/go-base32 v0.0.3 // indirect - github.com/multiformats/go-base36 v0.1.0 // indirect - github.com/multiformats/go-multibase v0.2.0 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.1.0 // indirect - github.com/pelletier/go-toml v1.9.4 // indirect + github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/prometheus/client_golang v1.12.0 // indirect - github.com/prometheus/client_model v0.2.1-0.20210607210712-147c58e9608a // indirect - github.com/prometheus/common v0.32.1 // indirect - github.com/prometheus/procfs v0.7.3 // indirect - github.com/segmentio/asm v1.2.0 // indirect - github.com/shengdoushi/base58 v1.0.0 // indirect github.com/shopspring/decimal v1.4.0 // indirect - github.com/spf13/afero v1.6.0 // indirect github.com/spf13/cast v1.7.1 // indirect - github.com/spf13/jwalterweatherman v1.1.0 // indirect - github.com/spf13/viper v1.1.1 // indirect - github.com/stretchr/testify v1.10.0 // indirect - github.com/sykesm/zap-logfmt v0.0.4 // indirect - github.com/weppos/publicsuffix-go v0.5.0 // indirect - github.com/zmap/zcrypto v0.0.0-20190729165852-9051775e6a2e // indirect - github.com/zmap/zlint v0.0.0-20190806154020-fd021b4cfbeb // indirect go.opentelemetry.io/auto/sdk v1.1.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 // indirect go.opentelemetry.io/otel v1.35.0 // indirect @@ -107,12 +75,9 @@ require ( go.opentelemetry.io/otel/sdk v1.35.0 // indirect go.opentelemetry.io/otel/trace v1.35.0 // indirect go.uber.org/multierr v1.10.0 // indirect - golang.org/x/sys v0.30.0 // indirect + golang.org/x/sys v0.32.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20250218202821-56aae31c358a // indirect - google.golang.org/grpc v1.71.0 // indirect - google.golang.org/protobuf v1.36.5 // indirect gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc // indirect - gopkg.in/yaml.v3 v3.0.1 // indirect gotest.tools/v3 v3.5.2 // indirect ) @@ -127,6 +92,7 @@ require ( github.com/go-playground/validator/v10 v10.24.0 github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect + github.com/hyperledger/fabric-admin-sdk v0.1.0 github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/mailru/easyjson v0.7.6 // indirect @@ -134,9 +100,8 @@ require ( github.com/spf13/pflag v1.0.5 // indirect github.com/swaggo/files v0.0.0-20220610200504-28940afbdbfe // indirect go.uber.org/atomic v1.7.0 // indirect - golang.org/x/net v0.35.0 // indirect golang.org/x/tools v0.29.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect ) -replace github.com/hyperledger/fabric-sdk-go => github.com/kfsoftware/fabric-sdk-go v0.0.0-20250318193343-db7cb6f42306 +replace github.com/hyperledger/fabric-admin-sdk => github.com/kfsoftware/fabric-admin-sdk v0.0.0-20250405175109-fd063100bb3f diff --git a/go.sum b/go.sum index 9ca548a..ec501e3 100644 --- a/go.sum +++ b/go.sum @@ -1,46 +1,9 @@ -bitbucket.org/liamstask/goose v0.0.0-20150115234039-8488cc47d90c/go.mod h1:hSVuE3qU7grINVSwrmzHfpg9k87ALBk+XaualNyUzI4= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= -cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= -cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= -cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= -cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= -cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= -cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= -cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= -cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= -cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= -cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= -cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= -cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= -cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= -cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= -cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= -cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= -cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s= dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/GeertJohan/go.incremental v1.0.0/go.mod h1:6fAjUhbVuX1KcMD3c8TEgVUqmo4seqhv0i0kdATSkM0= -github.com/GeertJohan/go.rice v1.0.0/go.mod h1:eH6gbSOAUv07dQuZVnBmoDP8mgsM1rtixis4Tib9if0= -github.com/Knetic/govaluate v3.0.0+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible h1:1G1pk05UrOh0NlF1oeaaix1x8XzrfjIDK47TY0Zehcw= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= github.com/KyleBanks/depth v1.2.1 h1:5h8fQADFrWtarTdtDudMmGsC7GPbOAu6RVB3ffsVFHc= @@ -53,68 +16,18 @@ github.com/Masterminds/sprig/v3 v3.3.0 h1:mQh0Yrg1XPo6vjYXgtf5OtijNAKJRNcTdOOGZe github.com/Masterminds/sprig/v3 v3.3.0/go.mod h1:Zy1iXRYNqNLUolqCpL4uhk6SHUMAOSCzdgBfDb35Lz0= github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= -github.com/Shopify/sarama v1.19.0 h1:9oksLxC6uxVPHPVYUmq6xhr1BOF/hHobWH2UzO67z1s= -github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= -github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= -github.com/VividCortex/gohistogram v1.0.0 h1:6+hBz+qvs0JOrrNhhmR7lFxo5sINxBCGXrdtl/UvroE= -github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= -github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= github.com/ajg/form v1.5.1 h1:t9c7v8JUKu/XxOGBU0yjNpaMloxGEJhUkqFRq0ibGeU= github.com/ajg/form v1.5.1/go.mod h1:uL1WgH+h2mgNtvBq0339dVnzXdBETtL2LeUXaIv25UY= -github.com/akavel/rsrc v0.8.0/go.mod h1:uLoCtb9J+EyAqh+26kdrTgmzRBFPGOolLWKpdxkKq+c= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= -github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= -github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= -github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= -github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= -github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= -github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= -github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= -github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= -github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= -github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= -github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/certifi/gocertifi v0.0.0-20180118203423-deb3ae2ef261/go.mod h1:GJKEexRPVJrBSOjoqN5VNOIKJ5Q3RViH6eu3puDRwx4= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= -github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cloudflare/backoff v0.0.0-20161212185259-647f3cdfc87a/go.mod h1:rzgs2ZOiguV6/NpiDgADjRLPNyZlApIWxKpkT+X8SdY= -github.com/cloudflare/cfssl v1.4.1 h1:vScfU2DrIUI9VPHBVeeAQ0q5A+9yshO1Gz+3QoUQiKw= -github.com/cloudflare/cfssl v1.4.1/go.mod h1:KManx/OJPb5QY+y0+o/898AMcM128sF0bURvoVUSjTo= -github.com/cloudflare/go-metrics v0.0.0-20151117154305-6a9aea36fb41/go.mod h1:eaZPlJWD+G9wseg1BuRXlHnjntPMrywMsyxf+LTOdP4= -github.com/cloudflare/redoctober v0.0.0-20171127175943-746a508df14c/go.mod h1:6Se34jNoqrd8bTxrmJB2Bg2aoZ2CdSXonils9NsiNgo= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= -github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= -github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/daaku/go.zipexe v1.0.0/go.mod h1:z8IiR6TsVLEYKwXAoE/I+8ys/sDkgTzSL0CLnGVd57E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -122,7 +35,6 @@ github.com/decred/dcrd/crypto/blake256 v1.0.1 h1:7PltbUIQB7u/FfZ39+DGa/ShuMyJ5il github.com/decred/dcrd/crypto/blake256 v1.0.1/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 h1:rpfIENRNNilwHwZeG5+P150SMrnNEcHYvcCuK6dPZSg= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0= -github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/docker/docker v27.5.1+incompatible h1:4PYU5dnBYqRQi0294d1FBECqT9ECWeQAIfE8q4YnPY8= @@ -131,52 +43,25 @@ github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/eapache/go-resiliency v1.1.0 h1:1NtRmCAqadE2FN4ZcN6g90TP3uk8cg9rn9eNK2197aU= -github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= -github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 h1:YEetp8/yCZMuEPMUDHG0CW/brkkEp8mzqk2+ODEitlw= -github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= -github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc= -github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= -github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= -github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/ethereum/go-ethereum v1.15.1 h1:ZR5hh6NXem4hNnhMIrdPFMTGHo6USTwWn47hbs6gRj4= github.com/ethereum/go-ethereum v1.15.1/go.mod h1:wGQINJKEVUunCeoaA9C9qKMQ9GEOsEIunzzqTUO2F6Y= -github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= -github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= -github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= github.com/gabriel-vasile/mimetype v1.4.8 h1:FfZ3gj38NjllZIeJAmMhr+qKL8Wu+nOoI3GqacKw1NM= github.com/gabriel-vasile/mimetype v1.4.8/go.mod h1:ByKUIKGjh1ODkGM1asKUbQZOLGrPjydw3hYPU2YU9t8= -github.com/getsentry/raven-go v0.0.0-20180121060056-563b81fc02b7/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-chi/chi/v5 v5.2.0 h1:Aj1EtB0qR2Rdo2dG4O94RIU35w2lvQSj6BRA4+qwFL0= github.com/go-chi/chi/v5 v5.2.0/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8= github.com/go-chi/cors v1.2.1 h1:xEC8UT3Rlp2QuWNEr4Fs/c2EAGVKBwy/1vHx3bppil4= github.com/go-chi/cors v1.2.1/go.mod h1:sSbTewc+6wYHBBCW7ytsFSn836hqM7JxpglAy2Vzc58= github.com/go-chi/render v1.0.3 h1:AsXqd2a1/INaIfUSKq3G5uA8weYx20FOsM7uSoCyyt4= github.com/go-chi/render v1.0.3/go.mod h1:/gr3hVkmYR0YlEy3LxCuVRFzEu9Ruok+gFqbIofjao0= -github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.10.0 h1:dXFJfIHVvUcpSgDOV+Ne6t7jXri8Tfv2uOLHUZ2XNuo= -github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logfmt/logfmt v0.5.0 h1:TrB8swr/68K7m9CcGut2g3UOihhbcbiMAYiuTXdEih4= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= @@ -201,179 +86,55 @@ github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJn github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= github.com/go-playground/validator/v10 v10.24.0 h1:KHQckvo8G6hlWnrPX4NJJ+aBfWNAE/HH+qdL2cBpCmg= github.com/go-playground/validator/v10 v10.24.0/go.mod h1:GGzBIJMuE98Ic/kJsBXbz1x/7cByt++cQ+YOuDM5wus= -github.com/go-sql-driver/mysql v1.3.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= -github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= -github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/goccy/go-json v0.10.3 h1:KZ5WoDbxAIgm2HNbYckL0se1fHD6rz5j4ywS6ebzDqA= -github.com/goccy/go-json v0.10.3/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= -github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-migrate/migrate/v4 v4.18.1 h1:JML/k+t4tpHCpQTCAD62Nu43NUFzHY4CV3uAuvHGC+Y= github.com/golang-migrate/migrate/v4 v4.18.1/go.mod h1:HAX6m3sQgcdO81tdjn5exv20+3Kb13cmGli1hrD6hks= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.4 h1:l75CXGRSwbaYNpl/Z2X1XIIAMSCquvXgpVZDhwEIJsc= -github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= -github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk= -github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/certificate-transparency-go v1.0.21 h1:Yf1aXowfZ2nuboBsg7iYGLmwsOARdV86pfH3g95wXmE= -github.com/google/certificate-transparency-go v1.0.21/go.mod h1:QeJfpSbVSfYc7RgB3gJFj9cbuQMMchQxrWXz8Ruopmg= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= -github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/pprof v0.0.0-20230207041349-798e818bf904 h1:4/hN5RUoecvl+RmJRE2YxKWtnnQls6rQjjW5oV7qg2U= +github.com/google/pprof v0.0.0-20230207041349-798e818bf904/go.mod h1:uglQLonpP8qtYCYyzA+8c/9qtqgA3qsXGYqCPKARAFg= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= -github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.9.5 h1:UImYN5qQ8tuGpGE16ZmjvcTtTw24zw1QAp/SlnNrZhI= -github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI= +github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vbp88Yd8NsDy6rZz+RcrMPxvld8= github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.1 h1:e9Rjr40Z98/clHv5Yg79Is0NtosR5LXRvdr7o/6NwbA= github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.1/go.mod h1:tIxuGz/9mpox++sgp9fJjHO0+q1X9/UOWd798aAm22M= -github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= -github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= -github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= -github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= -github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= -github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-version v1.2.0 h1:3vNe/fWF5CBgRIguda1meWhsZHy3m8gCJ5wx+dIzX/E= -github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= -github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= -github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= -github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= -github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/holiman/uint256 v1.3.2 h1:a9EgMPSC1AAaj1SZL5zIQD3WbwTuHrMGOerLjGmM/TA= github.com/holiman/uint256 v1.3.2/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/huandu/xstrings v1.5.0 h1:2ag3IFq9ZDANvthTwTiqSSZLjDc+BedvHPAp5tJy2TI= github.com/huandu/xstrings v1.5.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= -github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= -github.com/hyperledger/fabric v2.1.1+incompatible h1:cYYRv3vVg4kA6DmrixLxwn1nwBEUuYda8DsMwlaMKbY= -github.com/hyperledger/fabric v2.1.1+incompatible/go.mod h1:tGFAOCT696D3rG0Vofd2dyWYLySHlh0aQjf7Q1HAju0= -github.com/hyperledger/fabric-amcl v0.0.0-20230602173724-9e02669dceb2 h1:B1Nt8hKb//KvgGRprk0h1t4lCnwhE9/ryb1WqfZbV+M= -github.com/hyperledger/fabric-amcl v0.0.0-20230602173724-9e02669dceb2/go.mod h1:X+DIyUsaTmalOpmpQfIvFZjKHQedrURQ5t4YqquX7lE= -github.com/hyperledger/fabric-config v0.0.5/go.mod h1:YpITBI/+ZayA3XWY5lF302K7PAsFYjEEPM/zr3hegA8= -github.com/hyperledger/fabric-config v0.1.0 h1:TsR3y5xEoUmXWfp8tcDycjJhVvXEHiV5kfZIxuIte08= -github.com/hyperledger/fabric-config v0.1.0/go.mod h1:aeDZ0moG/qKvwLjddcqYr8+58/oNaJy3HE0tI01546c= -github.com/hyperledger/fabric-lib-go v1.0.0 h1:UL1w7c9LvHZUSkIvHTDGklxFv2kTeva1QI2emOVc324= -github.com/hyperledger/fabric-lib-go v1.0.0/go.mod h1:H362nMlunurmHwkYqR5uHL2UDWbQdbfz74n8kbCFsqc= -github.com/hyperledger/fabric-protos-go v0.0.0-20200424173316-dd554ba3746e/go.mod h1:xVYTjK4DtZRBxZ2D9aE4y6AbLaPwue2o/criQyQbVD0= -github.com/hyperledger/fabric-protos-go v0.0.0-20211118165945-23d738fc3553/go.mod h1:xVYTjK4DtZRBxZ2D9aE4y6AbLaPwue2o/criQyQbVD0= -github.com/hyperledger/fabric-protos-go v0.3.0 h1:MXxy44WTMENOh5TI8+PCK2x6pMj47Go2vFRKDHB2PZs= -github.com/hyperledger/fabric-protos-go v0.3.0/go.mod h1:WWnyWP40P2roPmmvxsUXSvVI/CF6vwY1K1UFidnKBys= -github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/hyperledger/fabric-config v0.3.0 h1:FS5/dc9GAniljP6RYxQRG92AaiBVoN2vTvtOvnWqeQs= +github.com/hyperledger/fabric-config v0.3.0/go.mod h1:kSevTn78K83Suc++JsEo7Nt1tYIPqDajW+ORz3OhWlg= +github.com/hyperledger/fabric-gateway v1.5.0 h1:JChlqtJNm2479Q8YWJ6k8wwzOiu2IRrV3K8ErsQmdTU= +github.com/hyperledger/fabric-gateway v1.5.0/go.mod h1:v13OkXAp7pKi4kh6P6epn27SyivRbljr8Gkfy8JlbtM= +github.com/hyperledger/fabric-protos-go-apiv2 v0.3.3 h1:Xpd6fzG/KjAOHJsq7EQXY2l+qi/y8muxBaY7R6QWABk= +github.com/hyperledger/fabric-protos-go-apiv2 v0.3.3/go.mod h1:2pq0ui6ZWA0cC8J+eCErgnMDCS1kPOEYVY+06ZAK0qE= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= -github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= -github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= -github.com/jmhodges/clock v0.0.0-20160418191101-880ee4c33548/go.mod h1:hGT6jSUVzF6no3QaDSMLGLEHtHSBSefs+MgcDWnmhmo= -github.com/jmoiron/sqlx v0.0.0-20180124204410-05cef0741ade/go.mod h1:IiEW3SEiiErVyFdH8NTuWjSifiEQKUoyK3LNqr2kCHU= -github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= -github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= -github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= -github.com/kfsoftware/fabric-sdk-go v0.0.0-20240114221414-98466038585d h1:HcMV8Lve3QkZUIWYHP+rVIR4xtTdDPooj7Id0IdBj0o= -github.com/kfsoftware/fabric-sdk-go v0.0.0-20240114221414-98466038585d/go.mod h1:JRplpKBeAvXjsBhOCCM/KvMRUbdDyhsAh80qbXzKc10= -github.com/kfsoftware/fabric-sdk-go v0.0.0-20250318193343-db7cb6f42306 h1:1HeRlKS4qdrC26HAe8ZqRiuBUPiGFDY7taHuehyraRE= -github.com/kfsoftware/fabric-sdk-go v0.0.0-20250318193343-db7cb6f42306/go.mod h1:JRplpKBeAvXjsBhOCCM/KvMRUbdDyhsAh80qbXzKc10= -github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kfsoftware/fabric-admin-sdk v0.0.0-20250405175109-fd063100bb3f h1:w8Fhi718VAjC5Snwjx+1swaSyXCYEMzs0+kQICsjHc0= +github.com/kfsoftware/fabric-admin-sdk v0.0.0-20250405175109-fd063100bb3f/go.mod h1:lg28l2L1QhpsdKTfGLMmz0Ug+ZTbJOX6nhM7YhzcVgE= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/kisielk/sqlstruct v0.0.0-20150923205031-648daed35d49/go.mod h1:yyMNCyc/Ib3bDTKd379tNMpB/7/H5TjM2Y9QJ5THLbE= -github.com/kisom/goutils v1.1.0/go.mod h1:+UBTfd78habUYWFbNWTJNG+jNG/i/lGURakr4A/yNRw= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= @@ -381,221 +142,70 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/kylelemons/go-gypsy v0.0.0-20160905020020-08cad365cd28/go.mod h1:T/T7jsxVqf9k/zYOqbgNAsANsjxTd1Yq3htjDhQ1H0c= github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ= github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI= -github.com/lestrrat-go/blackmagic v1.0.2 h1:Cg2gVSc9h7sz9NOByczrbUvLopQmXrfFx//N+AkAr5k= -github.com/lestrrat-go/blackmagic v1.0.2/go.mod h1:UrEqBzIR2U6CnzVyUtfM6oZNMt/7O7Vohk2J0OGSAtU= -github.com/lestrrat-go/httpcc v1.0.1 h1:ydWCStUeJLkpYyjLDHihupbn2tYmZ7m22BGkcvZZrIE= -github.com/lestrrat-go/httpcc v1.0.1/go.mod h1:qiltp3Mt56+55GPVCbTdM9MlqhvzyuL6W/NMDA8vA5E= -github.com/lestrrat-go/httprc v1.0.6 h1:qgmgIRhpvBqexMJjA/PmwSvhNk679oqD1RbovdCGW8k= -github.com/lestrrat-go/httprc v1.0.6/go.mod h1:mwwz3JMTPBjHUkkDv/IGJ39aALInZLrhBp0X7KGUZlo= -github.com/lestrrat-go/iter v1.0.2 h1:gMXo1q4c2pHmC3dn8LzRhJfP1ceCbgSiT9lUydIzltI= -github.com/lestrrat-go/iter v1.0.2/go.mod h1:Momfcq3AnRlRjI5b5O8/G5/BvpzrhoFTZcn06fEOPt4= -github.com/lestrrat-go/jwx/v2 v2.1.1 h1:Y2ltVl8J6izLYFs54BVcpXLv5msSW4o8eXwnzZLI32E= -github.com/lestrrat-go/jwx/v2 v2.1.1/go.mod h1:4LvZg7oxu6Q5VJwn7Mk/UwooNRnTHUpXBj2C4j3HNx0= -github.com/lestrrat-go/option v1.0.1 h1:oAzP2fvZGQKWkvHa1/SAcFolBEca1oN+mQ7eooNBEYU= -github.com/lestrrat-go/option v1.0.1/go.mod h1:5ZHFbivi4xwXxhxY9XHDe2FHo6/Z7WWmtT7T5nBBp3I= -github.com/lib/pq v0.0.0-20180201184707-88edab080323/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= -github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= -github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= -github.com/magiconair/properties v1.8.5 h1:b6kJs+EmPFMYGkow9GiUyCyOvIwYetYJ3fSaWak/Gls= -github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA= github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/mattn/go-sqlite3 v1.14.24 h1:tpSp2G2KyMnnQu99ngJ47EIkWVmliIizyZBfPrBWDRM= github.com/mattn/go-sqlite3 v1.14.24/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= -github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= -github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/miekg/pkcs11 v1.0.3 h1:iMwmD7I5225wv84WxIG/bmxz9AXjWvTWIbM/TYHvWtw= -github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= -github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/miekg/pkcs11 v1.1.1 h1:Ugu9pdy6vAYku5DEpVWVFPYnzV+bxB+iRdbuFSu7TvU= +github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= -github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= -github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= -github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.3.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/mapstructure v1.4.1 h1:CpVNEelQCZBooIPDn+AR3NpivK/TIKU8bDxdASFVQag= -github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= -github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= -github.com/mr-tron/base58 v1.1.0 h1:Y51FGVJ91WBqCEabAi5OPUz38eAx8DakuAm5svLcsfQ= -github.com/mr-tron/base58 v1.1.0/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= -github.com/mreiferson/go-httpclient v0.0.0-20160630210159-31f0106b4474/go.mod h1:OQA4XLvDbMgS8P0CevmM4m9Q3Jq4phKUzcocxuGJ5m8= -github.com/multiformats/go-base32 v0.0.3 h1:tw5+NhuwaOjJCC5Pp82QuXbrmLzWg7uxlMFp8Nq/kkI= -github.com/multiformats/go-base32 v0.0.3/go.mod h1:pLiuGC8y0QR3Ue4Zug5UzK9LjgbkL8NSQj0zQ5Nz/AA= -github.com/multiformats/go-base36 v0.1.0 h1:JR6TyF7JjGd3m6FbLU2cOxhC0Li8z8dLNGQ89tUg4F4= -github.com/multiformats/go-base36 v0.1.0/go.mod h1:kFGE83c6s80PklsHO9sRn2NCoffoRdUUOENyW/Vv6sM= -github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g= -github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= -github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU= -github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k= -github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= -github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= -github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= -github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/nkovacs/streamquote v0.0.0-20170412213628-49af9bddb229/go.mod h1:0aYXnNPJ8l7uZxf45rWW1a/uME32OF0rhiYGNQ2oF2E= -github.com/nuts-foundation/go-did v0.15.0 h1:aNl6KC8jiyRJGl9PPKFBboLLC0wUm5h+tjE1UBDQEPw= -github.com/nuts-foundation/go-did v0.15.0/go.mod h1:swjCJvcRxc+i1nyieIERWEb3vFb4N7iYC+qen2OIbNg= -github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= -github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= -github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= -github.com/onsi/gomega v1.15.0 h1:WjP/FQ/sk43MRmnEcT+MlDw2TFvkrXlprrPST/IudjU= -github.com/onsi/gomega v1.15.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+tEHG0= -github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= +github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc= +github.com/onsi/ginkgo/v2 v2.17.1 h1:V++EzdbhI4ZV4ev0UTIj0PzhzOcReJFyJaLjtSF55M8= +github.com/onsi/ginkgo/v2 v2.17.1/go.mod h1:llBI3WDLL9Z6taip6f33H76YcWtJv+7R3HigUjbIBOs= +github.com/onsi/gomega v1.32.0 h1:JRYU78fJ1LPxlckP6Txi/EYqJvjtMrDC04/MM5XRHPk= +github.com/onsi/gomega v1.32.0/go.mod h1:a4x4gW6Pz2yK1MAmvluYme5lvYTn61afQ2ETw/8n4Lg= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM= -github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= -github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= -github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA= -github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= -github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= -github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= -github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= -github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= -github.com/pelletier/go-toml v1.9.4 h1:tjENF6MfZAg8e4ZmZTeWaWiT2vXtsoO6+iuOjFhECwM= -github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= -github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= -github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= -github.com/pierrec/lz4 v2.0.5+incompatible h1:2xWsjqPFWcplujydGg4WmhC/6fZqK42wMM8aXeqhl0I= -github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= +github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= -github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= -github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= -github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.12.0 h1:C+UIj/QWtmqY13Arb8kwMt5j34/0Z2iKamrJ+ryC0Gg= -github.com/prometheus/client_golang v1.12.0/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.1-0.20210607210712-147c58e9608a h1:CmF68hwI0XsOQ5UwlBopMi2Ow4Pbg32akc4KIVCOm+Y= -github.com/prometheus/client_model v0.2.1-0.20210607210712-147c58e9608a/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= -github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= -github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= -github.com/prometheus/common v0.32.1 h1:hWIdL3N2HoUx3B8j3YN9mWor0qhY/NlEKZEaXxuIRh4= -github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= -github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU= -github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a h1:9ZKAASQSHhDYGoxY8uLVpewe1GDZ2vu2Tr/vTdVAkFQ= -github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= -github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= -github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= -github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= -github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys= -github.com/segmentio/asm v1.2.0/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs= -github.com/shengdoushi/base58 v1.0.0 h1:tGe4o6TmdXFJWoI31VoSWvuaKxf0Px3gqa3sUWhAxBs= -github.com/shengdoushi/base58 v1.0.0/go.mod h1:m5uIILfzcKMw6238iWAhP4l3s5+uXyF3+bJKUNhAL9I= github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k= github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME= -github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.3.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= -github.com/spf13/afero v1.6.0 h1:xoax2sJ2DT8S8xA2paPFjDCScCNeWsg75VG0DLRreiY= -github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= -github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y= github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= -github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= -github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= -github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= -github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.1.1 h1:/8JBRFO4eoHu1TmpsLgNBq1CQgRUg4GolYlEFieqJgo= -github.com/spf13/viper v1.1.1/go.mod h1:A8kyI5cUJhb8N+3pkfONlcEcZbueH6nhAm0Fq7SrnBM= -github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= -github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= -github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/swaggo/files v0.0.0-20220610200504-28940afbdbfe h1:K8pHPVoTgxFJt1lXuIzzOX7zZhZFldJQK/CgKx9BFIc= @@ -604,37 +214,8 @@ github.com/swaggo/http-swagger v1.3.4 h1:q7t/XLx0n15H1Q9/tk3Y9L4n210XzJF5WtnDX64 github.com/swaggo/http-swagger v1.3.4/go.mod h1:9dAh0unqMBAlbp1uE2Uc2mQTxNMU/ha4UbucIg1MFkQ= github.com/swaggo/swag v1.16.4 h1:clWJtd9LStiG3VeijiCfOVODP6VpHtKdQy9ELFG3s1A= github.com/swaggo/swag v1.16.4/go.mod h1:VBsHJRsDvfYvqoiMKnsdwhNV9LEMHgEDZcyVYX0sxPg= -github.com/sykesm/zap-logfmt v0.0.4 h1:U2WzRvmIWG1wDLCFY3sz8UeEmsdHQjHFNlIdmroVFaI= -github.com/sykesm/zap-logfmt v0.0.4/go.mod h1:AuBd9xQjAe3URrWT1BBDk2v2onAZHkZkWRMiYZXiZWA= -github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= -github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= -github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= -github.com/weppos/publicsuffix-go v0.4.0/go.mod h1:z3LCPQ38eedDQSwmsSRW4Y7t2L8Ln16JPQ02lHAdn5k= -github.com/weppos/publicsuffix-go v0.5.0 h1:rutRtjBJViU/YjcI5d80t4JAVvDltS6bciJg2K1HrLU= -github.com/weppos/publicsuffix-go v0.5.0/go.mod h1:z3LCPQ38eedDQSwmsSRW4Y7t2L8Ln16JPQ02lHAdn5k= -github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/ziutek/mymysql v1.5.4/go.mod h1:LMSpPZ6DbqWFxNCHW77HeMg9I646SAhApZ/wKdgO/C0= -github.com/zmap/rc2 v0.0.0-20131011165748-24b9757f5521/go.mod h1:3YZ9o3WnatTIZhuOtot4IcUfzoKVjUHqu6WALIyI0nE= -github.com/zmap/zcertificate v0.0.0-20180516150559-0e3d58b1bac4/go.mod h1:5iU54tB79AMBcySS0R2XIyZBAVmeHranShAFELYx7is= -github.com/zmap/zcrypto v0.0.0-20190729165852-9051775e6a2e h1:mvOa4+/DXStR4ZXOks/UsjeFdn5O5JpLUtzqk9U8xXw= -github.com/zmap/zcrypto v0.0.0-20190729165852-9051775e6a2e/go.mod h1:w7kd3qXHh8FNaczNjslXqvFQiv5mMWRXlL9klTUAHc8= -github.com/zmap/zlint v0.0.0-20190806154020-fd021b4cfbeb h1:vxqkjztXSaPVDc8FQCdHTaejm2x747f6yPbnu1h2xkg= -github.com/zmap/zlint v0.0.0-20190806154020-fd021b4cfbeb/go.mod h1:29UiAJNsiVdvTBFCJW8e3q6dcDbOoPkhMgttOSCIMMY= -go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= -go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= -go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= -go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 h1:TT4fX+nBOA/+LUkobKGW1ydGcn+G3vRw9+g5HwCphpk= @@ -655,233 +236,79 @@ go.opentelemetry.io/otel/trace v1.35.0 h1:dPpEfJu1sDIqruz7BHFG3c7528f6ddfSWfFDVt go.opentelemetry.io/otel/trace v1.35.0/go.mod h1:WUk7DtFp1Aw2MkvqGdwiXYDZZNvA/1J8o6xRXLrIkyc= go.opentelemetry.io/proto/otlp v1.5.0 h1:xJvq7gMzB31/d406fB8U5CBdyQGw4P399D1aQWU/3i4= go.opentelemetry.io/proto/otlp v1.5.0/go.mod h1:keN8WnHxOy8PG0rQZjJJ5A2ebUoafqWp0eVQ4yIXvJ4= -go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= -go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/mock v0.4.0 h1:VcM4ZOtdbR4f6VXfiOpwpVJDL6lCReaZ6mw31wqh7KU= +go.uber.org/mock v0.4.0/go.mod h1:a6FSlNadKUHUa9IP5Vyt1zh4fC7uAwxMutEAscFbkZc= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/multierr v1.10.0 h1:S0h4aNzvfcFsC3dRF1jLoaov7oRaKqRGC/pUEJ2yvPQ= go.uber.org/multierr v1.10.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= -go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= -go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.12.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= -go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= +go.uber.org/zap v1.18.1/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.33.0 h1:IOBPskki6Lysi0lo9qQvbxiQ+FvsCC/YWOecCHAixus= -golang.org/x/crypto v0.33.0/go.mod h1:bVdXmD7IV/4GdElGPozy6U7lWdRXA4qyRVGJV57uQ5M= +golang.org/x/crypto v0.37.0 h1:kJNSjF/Xp7kU0iB2Z+9viTPMW4EqqsrywMXLJOOsXSE= +golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= -golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= -golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= -golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4= golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.35.0 h1:T5GQRQb2y08kTAByq9L4/bz8cipCdA8FbRTXewonqY8= -golang.org/x/net v0.35.0/go.mod h1:EglIi67kWsHKlRzzVMUD93VMSWGFOMSZgxFjparz1Qk= +golang.org/x/net v0.37.0 h1:1zLorHbz+LYj7MQlSf1+2tPIIgibq2eL5xkrGk6f+2c= +golang.org/x/net v0.37.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w= -golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sync v0.13.0 h1:AauUjRAJ9OSnvULf/ARrrVywoJDy0YS2AwQ98I37610= +golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc= -golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20= +golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.22.0 h1:bofq7m3/HAFvbF51jz3Q9wLg3jkvSPuiZu/pD1XwgtM= -golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY= -golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0= +golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= -golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.29.0 h1:Xx0h3TtM9rzQpQuR4dKLrdglAmCEN5Oi+P74JdhdzXE= golang.org/x/tools v0.29.0/go.mod h1:KMQVMRsVxU6nHCFXrBPhDB8XncLNLM0lIy/F14RP588= @@ -889,140 +316,44 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= -google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= -google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= -google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= -google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= -google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20240213162025-012b6fc9bca9 h1:9+tzLLstTlPTRyJTh+ah5wIMsBW5c4tQwGTN3thOW9Y= google.golang.org/genproto/googleapis/api v0.0.0-20250218202821-56aae31c358a h1:nwKuGPlUAt+aR+pcrkfFRrTU1BVrSmYyYMxYbUIVHr0= google.golang.org/genproto/googleapis/api v0.0.0-20250218202821-56aae31c358a/go.mod h1:3kWAYMk1I75K4vykHtKt2ycnOgpA6974V7bREqbsenU= google.golang.org/genproto/googleapis/rpc v0.0.0-20250218202821-56aae31c358a h1:51aaUVRocpvUOSQKM6Q7VuoaktNIaMCLuhZB6DKksq4= google.golang.org/genproto/googleapis/rpc v0.0.0-20250218202821-56aae31c358a/go.mod h1:uRxBH1mhmO8PGhU89cMcHaXKZqO+OfakD8QQO0oYwlQ= -google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.71.0 h1:kF77BGdPTQ4/JZWMlb9VpJ5pa25aqvVqogsxNHHdeBg= google.golang.org/grpc v1.71.0/go.mod h1:H0GRtasmQOh9LkFoCPDu3ZrwUtD1YGE+b2vYBYd/8Ec= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM= google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc h1:2gGKlE2+asNV9m7xrywl36YYNnBG5ZQ0r/BOOxqPpmk= gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc/go.mod h1:m7x9LTH6d71AHyAX77c9yqWCCa3UKHcVEj9y7hAtKDk= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= gopkg.in/mail.v2 v2.3.1 h1:WYFn/oANrAGP2C0dcV6/pbkPzv8yGzqTjPmTeO7qoXk= gopkg.in/mail.v2 v2.3.1/go.mod h1:htwXN1Qh09vZJ1NVKxQqHPBaCBbzKhp5GzuJEA4VJWw= -gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= -gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q= gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA= -honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= -rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= -sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= diff --git a/internal/protoutil/protoutil.go b/internal/protoutil/protoutil.go new file mode 100644 index 0000000..a271cc1 --- /dev/null +++ b/internal/protoutil/protoutil.go @@ -0,0 +1,114 @@ +package protoutil + +import ( + "bytes" + "errors" + "fmt" + + "github.com/hyperledger/fabric-gateway/pkg/identity" + + "github.com/hyperledger/fabric-protos-go-apiv2/common" + "github.com/hyperledger/fabric-protos-go-apiv2/peer" +) + +// CreateSignedTx assembles an Envelope message from proposal, endorsements, +// and a signer. This function should be called by a client when it has +// collected enough endorsements for a proposal to create a transaction and +// submit it to peers for ordering +func CreateSignedTx( + proposal *peer.Proposal, + signer identity.Sign, + resps ...*peer.ProposalResponse, +) (*common.Envelope, error) { + if err := ensureValidResponses(resps); err != nil { + return nil, err + } + + // the original header + hdr, err := UnmarshalHeader(proposal.Header) + if err != nil { + return nil, err + } + + // the original payload + pPayl, err := UnmarshalChaincodeProposalPayload(proposal.Payload) + if err != nil { + return nil, err + } + + endorsements := fillEndorsements(resps) + + // create ChaincodeEndorsedAction + cea := &peer.ChaincodeEndorsedAction{ProposalResponsePayload: resps[0].Payload, Endorsements: endorsements} + + // obtain the bytes of the proposal payload that will go to the transaction + propPayloadBytes, err := GetBytesProposalPayloadForTx(pPayl) + if err != nil { + return nil, err + } + + // serialize the chaincode action payload + cap := &peer.ChaincodeActionPayload{ChaincodeProposalPayload: propPayloadBytes, Action: cea} + capBytes, err := GetBytesChaincodeActionPayload(cap) + if err != nil { + return nil, err + } + + // create a transaction + taa := &peer.TransactionAction{Header: hdr.SignatureHeader, Payload: capBytes} + taas := make([]*peer.TransactionAction, 1) + taas[0] = taa + tx := &peer.Transaction{Actions: taas} + + // serialize the tx + txBytes, err := GetBytesTransaction(tx) + if err != nil { + return nil, err + } + + // create the payload + payl := &common.Payload{Header: hdr, Data: txBytes} + paylBytes, err := GetBytesPayload(payl) + if err != nil { + return nil, err + } + + // sign the payload + sig, err := signer(paylBytes) + if err != nil { + return nil, err + } + + // here's the envelope + return &common.Envelope{Payload: paylBytes, Signature: sig}, nil +} + +// ensureValidResponses checks that all actions are bitwise equal and that they are successful. +func ensureValidResponses(responses []*peer.ProposalResponse) error { + if len(responses) == 0 { + return errors.New("at least one proposal response is required") + } + + var firstResponse []byte + for n, r := range responses { + if r.Response.Status < 200 || r.Response.Status >= 400 { + return fmt.Errorf("proposal response was not successful, error code %d, msg %s", r.Response.Status, r.Response.Message) + } + + if n == 0 { + firstResponse = r.Payload + } else if !bytes.Equal(firstResponse, r.Payload) { + return errors.New("ProposalResponsePayloads do not match") + } + } + + return nil +} + +func fillEndorsements(responses []*peer.ProposalResponse) []*peer.Endorsement { + endorsements := make([]*peer.Endorsement, len(responses)) + for n, r := range responses { + endorsements[n] = r.Endorsement + } + return endorsements +} diff --git a/internal/protoutil/txtutils.go b/internal/protoutil/txtutils.go new file mode 100644 index 0000000..cb8a928 --- /dev/null +++ b/internal/protoutil/txtutils.go @@ -0,0 +1,382 @@ +package protoutil + +import ( + "bytes" + "crypto/rand" + "crypto/sha256" + "encoding/hex" + "errors" + "fmt" + + "github.com/hyperledger/fabric-protos-go-apiv2/msp" + + "github.com/hyperledger/fabric-admin-sdk/pkg/identity" + "github.com/hyperledger/fabric-protos-go-apiv2/common" + "github.com/hyperledger/fabric-protos-go-apiv2/peer" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/timestamppb" +) + +// ComputeTxID computes TxID as the Hash computed +// over the concatenation of nonce and creator. +func ComputeTxID(nonce, creator []byte) string { + // TODO: Get the Hash function to be used from + // channel configuration + hasher := sha256.New() + hasher.Write(nonce) + hasher.Write(creator) + return hex.EncodeToString(hasher.Sum(nil)) +} + +func GetRandomNonce() ([]byte, error) { + return getRandomNonce() +} + +func getRandomNonce() ([]byte, error) { + key := make([]byte, 24) + + _, err := rand.Read(key) + if err != nil { + return nil, fmt.Errorf("error getting random bytes %w", err) + } + return key, nil +} + +// MarshalOrPanic serializes a protobuf message and panics if this +// operation fails +func MarshalOrPanic(pb proto.Message) []byte { + data, err := proto.Marshal(pb) + if err != nil { + panic(err) + } + return data +} + +// ExtractConfigFromBlock extracts the config from a block +func ExtractConfigFromBlock(block *common.Block) (*common.Config, error) { + if block == nil { + return nil, errors.New("nil block") + } + + envelope, err := GetEnvelopeFromBlock(block.Data.Data[0]) + if err != nil { + return nil, err + } + + payload, err := UnmarshalPayload(envelope.Payload) + if err != nil { + return nil, err + } + + if payload.Header == nil || payload.Header.ChannelHeader == nil { + return nil, errors.New("bad header") + } + + chdr, err := UnmarshalChannelHeader(payload.Header.ChannelHeader) + if err != nil { + return nil, err + } + + if common.HeaderType(chdr.Type) != common.HeaderType_CONFIG { + return nil, errors.New("not a config block") + } + + configEnvelope := &common.ConfigEnvelope{} + if err := proto.Unmarshal(payload.Data, configEnvelope); err != nil { + return nil, err + } + + return configEnvelope.Config, nil +} + +// GetEnvelopeFromBlock gets an envelope from a block's data +func GetEnvelopeFromBlock(data []byte) (*common.Envelope, error) { + envelope := &common.Envelope{} + if err := proto.Unmarshal(data, envelope); err != nil { + return nil, err + } + return envelope, nil +} + +// FormSignedEnvelope creates a signed envelope from pre-existing signatures +func FormSignedEnvelope( + txType common.HeaderType, + channelID string, + dataMsg proto.Message, + signatures [][]byte, + msgVersion int32, + epoch uint64, +) (*common.Envelope, error) { + return FormSignedEnvelopeWithTLSBinding(txType, channelID, dataMsg, signatures, msgVersion, epoch, nil) +} + +// FormSignedEnvelopeWithTLSBinding creates a signed envelope from pre-existing signatures with TLS binding +func FormSignedEnvelopeWithTLSBinding( + txType common.HeaderType, + channelID string, + dataMsg proto.Message, + signatures [][]byte, + msgVersion int32, + epoch uint64, + tlsCertHash []byte, +) (*common.Envelope, error) { + // Create channel header + payloadChannelHeader := MakeChannelHeader(txType, msgVersion, channelID, epoch) + payloadChannelHeader.TlsCertHash = tlsCertHash + + // Marshal the data message + data, err := proto.Marshal(dataMsg) + if err != nil { + return nil, fmt.Errorf("error marshaling data message: %w", err) + } + + // Create a payload without signature header since we'll use pre-existing signatures + payload := &common.Payload{ + Header: &common.Header{ + ChannelHeader: MarshalOrPanic(payloadChannelHeader), + // No SignatureHeader here as we're using pre-existing signatures + }, + Data: data, + } + + payloadBytes := MarshalOrPanic(payload) + + // Create envelope with the payload + envelope := &common.Envelope{ + Payload: payloadBytes, + } + + // If signatures are provided, use the first one as the envelope signature + if len(signatures) > 0 { + envelope.Signature = signatures[0] + } + + return envelope, nil +} + +// CreateSignedEnvelope creates a signed envelope of the desired type, with +// marshaled dataMsg and signs it +func CreateSignedEnvelope( + txType common.HeaderType, + channelID string, + signer identity.SigningIdentity, + dataMsg proto.Message, + msgVersion int32, + epoch uint64, +) (*common.Envelope, error) { + return CreateSignedEnvelopeWithTLSBinding(txType, channelID, signer, dataMsg, msgVersion, epoch, nil) +} + +// CreateSignedEnvelopeWithTLSBinding creates a signed envelope of the desired +// type, with marshaled dataMsg and signs it. It also includes a TLS cert hash +// into the channel header +func CreateSignedEnvelopeWithTLSBinding( + txType common.HeaderType, + channelID string, + signer identity.SigningIdentity, + dataMsg proto.Message, + msgVersion int32, + epoch uint64, + tlsCertHash []byte, +) (*common.Envelope, error) { + payloadChannelHeader := MakeChannelHeader(txType, msgVersion, channelID, epoch) + payloadChannelHeader.TlsCertHash = tlsCertHash + var err error + payloadSignatureHeader := &common.SignatureHeader{} + + if signer != nil { + payloadSignatureHeader, err = NewSignatureHeader(signer) + if err != nil { + return nil, err + } + } + + data, err := proto.Marshal(dataMsg) + if err != nil { + return nil, fmt.Errorf("error marshaling %w", err) + } + + paylBytes := MarshalOrPanic( + &common.Payload{ + Header: MakePayloadHeader(payloadChannelHeader, payloadSignatureHeader), + Data: data, + }, + ) + + var sig []byte + if signer != nil { + sig, err = signer.Sign(paylBytes) + if err != nil { + return nil, err + } + } + + env := &common.Envelope{ + Payload: paylBytes, + Signature: sig, + } + + return env, nil +} + +// MakeChannelHeader creates a ChannelHeader. +func MakeChannelHeader(headerType common.HeaderType, version int32, chainID string, epoch uint64) *common.ChannelHeader { + return &common.ChannelHeader{ + Type: int32(headerType), + Version: version, + Timestamp: timestamppb.Now(), + ChannelId: chainID, + Epoch: epoch, + } +} + +// NewSignatureHeader returns a SignatureHeader with a valid nonce. +func NewSignatureHeader(id identity.Identity) (*common.SignatureHeader, error) { + serializedIdentity := &msp.SerializedIdentity{ + Mspid: id.MspID(), + IdBytes: id.Credentials(), + } + creator, err := proto.Marshal(serializedIdentity) + if err != nil { + return nil, err + } + nonce, err := CreateNonce() + if err != nil { + return nil, err + } + + return &common.SignatureHeader{ + Creator: creator, + Nonce: nonce, + }, nil +} + +func BlockDataHash(b *common.BlockData) []byte { + sum := sha256.Sum256(bytes.Join(b.Data, nil)) + return sum[:] +} + +// NewBlock constructs a block with no data and no metadata. +func NewBlock(seqNum uint64, previousHash []byte) *common.Block { + block := &common.Block{} + block.Header = &common.BlockHeader{} + block.Header.Number = seqNum + block.Header.PreviousHash = previousHash + block.Header.DataHash = []byte{} + block.Data = &common.BlockData{} + + var metadataContents [][]byte + for i := 0; i < len(common.BlockMetadataIndex_name); i++ { + metadataContents = append(metadataContents, []byte{}) + } + block.Metadata = &common.BlockMetadata{Metadata: metadataContents} + + return block +} + +// MakeSignatureHeader creates a SignatureHeader. +func MakeSignatureHeader(serializedCreatorCertChain []byte, nonce []byte) *common.SignatureHeader { + return &common.SignatureHeader{ + Creator: serializedCreatorCertChain, + Nonce: nonce, + } +} + +// SetTxID generates a transaction id based on the provided signature header +// and sets the TxId field in the channel header +func SetTxID(channelHeader *common.ChannelHeader, signatureHeader *common.SignatureHeader) { + channelHeader.TxId = ComputeTxID( + signatureHeader.Nonce, + signatureHeader.Creator, + ) +} + +// CreateNonceOrPanic generates a nonce using the common/crypto package +// and panics if this operation fails. +func CreateNonceOrPanic() []byte { + nonce, err := CreateNonce() + if err != nil { + panic(err) + } + return nonce +} + +// CreateNonce generates a nonce using the common/crypto package. +func CreateNonce() ([]byte, error) { + nonce, err := getRandomNonce() + return nonce, errors.Unwrap(fmt.Errorf("error generating random nonce %w", err)) +} + +// MakePayloadHeader creates a Payload Header. +func MakePayloadHeader(ch *common.ChannelHeader, sh *common.SignatureHeader) *common.Header { + return &common.Header{ + ChannelHeader: MarshalOrPanic(ch), + SignatureHeader: MarshalOrPanic(sh), + } +} + +// UnmarshalHeader unmarshals bytes to a Header +func UnmarshalHeader(bytes []byte) (*common.Header, error) { + hdr := &common.Header{} + err := proto.Unmarshal(bytes, hdr) + return hdr, errors.Unwrap(fmt.Errorf("error unmarshaling Header %w", err)) +} + +// UnmarshalChaincodeProposalPayload unmarshals bytes to a ChaincodeProposalPayload +func UnmarshalChaincodeProposalPayload(bytes []byte) (*peer.ChaincodeProposalPayload, error) { + cpp := &peer.ChaincodeProposalPayload{} + err := proto.Unmarshal(bytes, cpp) + return cpp, errors.Unwrap(fmt.Errorf("error unmarshaling ChaincodeProposalPayload %w", err)) +} + +// UnmarshalSignatureHeader unmarshals bytes to a SignatureHeader +func UnmarshalSignatureHeader(bytes []byte) (*common.SignatureHeader, error) { + sh := &common.SignatureHeader{} + err := proto.Unmarshal(bytes, sh) + return sh, errors.Unwrap(fmt.Errorf("error unmarshaling SignatureHeader %w", err)) +} + +// GetBytesProposalPayloadForTx takes a ChaincodeProposalPayload and returns +// its serialized version according to the visibility field +func GetBytesProposalPayloadForTx( + payload *peer.ChaincodeProposalPayload, +) ([]byte, error) { + // check for nil argument + if payload == nil { + return nil, errors.New("nil arguments") + } + + // strip the transient bytes off the payload + cppNoTransient := &peer.ChaincodeProposalPayload{Input: payload.Input, TransientMap: nil} + cppBytes, err := GetBytesChaincodeProposalPayload(cppNoTransient) + if err != nil { + return nil, err + } + + return cppBytes, nil +} + +// GetBytesChaincodeProposalPayload gets the chaincode proposal payload +func GetBytesChaincodeProposalPayload(cpp *peer.ChaincodeProposalPayload) ([]byte, error) { + cppBytes, err := proto.Marshal(cpp) + return cppBytes, errors.Unwrap(fmt.Errorf("error marshaling ChaincodeProposalPayload %w", err)) +} + +// GetBytesChaincodeActionPayload get the bytes of ChaincodeActionPayload from +// the message +func GetBytesChaincodeActionPayload(cap *peer.ChaincodeActionPayload) ([]byte, error) { + capBytes, err := proto.Marshal(cap) + return capBytes, errors.Unwrap(fmt.Errorf("error marshaling ChaincodeActionPayload %w", err)) +} + +// GetBytesTransaction get the bytes of Transaction from the message +func GetBytesTransaction(tx *peer.Transaction) ([]byte, error) { + bytes, err := proto.Marshal(tx) + return bytes, errors.Unwrap(fmt.Errorf("error unmarshaling Transaction %w", err)) +} + +// GetBytesPayload get the bytes of Payload from the message +func GetBytesPayload(payl *common.Payload) ([]byte, error) { + bytes, err := proto.Marshal(payl) + return bytes, errors.Unwrap(fmt.Errorf("error marshaling Payload %w", err)) +} diff --git a/internal/protoutil/unmarshalers.go b/internal/protoutil/unmarshalers.go new file mode 100644 index 0000000..92d9a4a --- /dev/null +++ b/internal/protoutil/unmarshalers.go @@ -0,0 +1,44 @@ +package protoutil + +import ( + "fmt" + + cb "github.com/hyperledger/fabric-protos-go-apiv2/common" + "google.golang.org/protobuf/proto" +) + +// UnmarshalEnvelope unmarshals bytes to a Envelope +func UnmarshalEnvelope(encoded []byte) (*cb.Envelope, error) { + envelope := &cb.Envelope{} + if err := proto.Unmarshal(encoded, envelope); err != nil { + return nil, fmt.Errorf("error unmarshaling Envelope: %w", err) + } + return envelope, nil +} + +// UnmarshalPayload unmarshals bytes to a Payload +func UnmarshalPayload(encoded []byte) (*cb.Payload, error) { + payload := &cb.Payload{} + if err := proto.Unmarshal(encoded, payload); err != nil { + return nil, fmt.Errorf("error unmarshaling Payload: %w", err) + } + return payload, nil +} + +// UnmarshalChannelHeader unmarshals bytes to a ChannelHeader +func UnmarshalChannelHeader(bytes []byte) (*cb.ChannelHeader, error) { + chdr := &cb.ChannelHeader{} + if err := proto.Unmarshal(bytes, chdr); err != nil { + return nil, fmt.Errorf("error unmarshaling ChannelHeader: %w", err) + } + return chdr, nil +} + +// UnmarshalConfigUpdateEnvelope attempts to unmarshal bytes to a *cb.ConfigUpdate +func UnmarshalConfigUpdateEnvelope(data []byte) (*cb.ConfigUpdateEnvelope, error) { + configUpdateEnvelope := &cb.ConfigUpdateEnvelope{} + if err := proto.Unmarshal(data, configUpdateEnvelope); err != nil { + return nil, fmt.Errorf("error unmarshaling ConfigUpdateEnvelope: %w", err) + } + return configUpdateEnvelope, nil +} diff --git a/pkg/auth/service.go b/pkg/auth/service.go index 59d0325..4f8d2b6 100644 --- a/pkg/auth/service.go +++ b/pkg/auth/service.go @@ -6,7 +6,6 @@ import ( "database/sql" "encoding/base64" "fmt" - "sync" "time" "github.com/chainlaunch/chainlaunch/pkg/db" @@ -15,16 +14,13 @@ import ( // AuthService handles authentication operations type AuthService struct { - db *db.Queries - sessions map[string]*Session - mu sync.RWMutex + db *db.Queries } // NewAuthService creates a new authentication service func NewAuthService(db *db.Queries) *AuthService { return &AuthService{ - db: db, - sessions: make(map[string]*Session), + db: db, } } @@ -62,7 +58,7 @@ func (s *AuthService) InitializeDefaultUser() (string, error) { } // Create admin user - _, err = s.db.CreateUser(context.Background(), db.CreateUserParams{ + _, err = s.db.CreateUser(context.Background(), &db.CreateUserParams{ Username: "admin", Password: string(hashedPassword), }) @@ -105,7 +101,7 @@ func (s *AuthService) Login(ctx context.Context, username, password string) (*Se // Create session in database expiresAt := time.Now().Add(24 * time.Hour) // Sessions expire after 24 hours - dbSession, err := s.db.CreateSession(ctx, db.CreateSessionParams{ + dbSession, err := s.db.CreateSession(ctx, &db.CreateSessionParams{ SessionID: id, UserID: user.ID, Token: token, @@ -197,7 +193,7 @@ func (s *AuthService) CreateUser(ctx context.Context, username, password string) } // Create user - _, err = s.db.CreateUser(ctx, db.CreateUserParams{ + _, err = s.db.CreateUser(ctx, &db.CreateUserParams{ Username: username, Password: string(hashedPassword), }) @@ -230,7 +226,7 @@ func (s *AuthService) ListUsers(ctx context.Context) ([]*User, error) { // UpdateUser updates a user's details func (s *AuthService) UpdateUser(ctx context.Context, id int64, username, password string) error { - params := db.UpdateUserParams{ + params := &db.UpdateUserParams{ ID: id, Username: username, } @@ -276,3 +272,47 @@ func (s *AuthService) GetUserByID(ctx context.Context, id int64) (*User, error) LastLoginAt: dbUser.LastLoginAt.Time, }, nil } + +// UpdateUserPassword updates a user's password +func (s *AuthService) UpdateUserPassword(ctx context.Context, username, newPassword string) error { + // Get user from database + user, err := s.db.GetUserByUsername(ctx, username) + if err != nil { + if err == sql.ErrNoRows { + return fmt.Errorf("user not found") + } + return fmt.Errorf("failed to get user: %w", err) + } + + // Check if new password matches current password + if err := bcrypt.CompareHashAndPassword([]byte(user.Password), []byte(newPassword)); err == nil { + // Passwords match, no need to update + return nil + } + + // Hash new password + hashedPassword, err := bcrypt.GenerateFromPassword([]byte(newPassword), bcrypt.DefaultCost) + if err != nil { + return fmt.Errorf("failed to hash password: %w", err) + } + + // Update user's password + params := &db.UpdateUserParams{ + ID: user.ID, + Username: user.Username, + Column2: string(hashedPassword), + Password: string(hashedPassword), + } + + _, err = s.db.UpdateUser(ctx, params) + if err != nil { + return fmt.Errorf("failed to update user password: %w", err) + } + + // Delete all existing sessions for this user for security + if err := s.db.DeleteUserSessions(ctx, user.ID); err != nil { + return fmt.Errorf("failed to delete user sessions: %w", err) + } + + return nil +} diff --git a/pkg/backups/http/handler.go b/pkg/backups/http/handler.go index cd54779..41dd2bb 100644 --- a/pkg/backups/http/handler.go +++ b/pkg/backups/http/handler.go @@ -55,7 +55,7 @@ func (h *Handler) RegisterRoutes(r chi.Router) { // CreateBackupTarget godoc // @Summary Create a new backup target // @Description Create a new backup target with the specified configuration -// @Tags backup-targets +// @Tags Backup Targets // @Accept json // @Produce json // @Param request body CreateBackupTargetRequest true "Backup target creation request" @@ -111,7 +111,7 @@ func (h *Handler) CreateBackupTarget(w http.ResponseWriter, r *http.Request) err // ListBackupTargets godoc // @Summary List all backup targets // @Description Get a list of all backup targets -// @Tags backup-targets +// @Tags Backup Targets // @Accept json // @Produce json // @Success 200 {array} BackupTargetResponse @@ -134,7 +134,7 @@ func (h *Handler) ListBackupTargets(w http.ResponseWriter, r *http.Request) erro // GetBackupTarget godoc // @Summary Get a backup target by ID // @Description Get detailed information about a specific backup target -// @Tags backup-targets +// @Tags Backup Targets // @Accept json // @Produce json // @Param id path int true "Backup Target ID" @@ -170,7 +170,7 @@ func (h *Handler) GetBackupTarget(w http.ResponseWriter, r *http.Request) error // DeleteBackupTarget godoc // @Summary Delete a backup target // @Description Delete a backup target and all associated backups -// @Tags backup-targets +// @Tags Backup Targets // @Accept json // @Produce json // @Param id path int true "Backup Target ID" @@ -205,7 +205,7 @@ func (h *Handler) DeleteBackupTarget(w http.ResponseWriter, r *http.Request) err // CreateBackupSchedule godoc // @Summary Create a new backup schedule // @Description Create a new backup schedule with the specified configuration -// @Tags backup-schedules +// @Tags Backup Schedules // @Accept json // @Produce json // @Param request body CreateBackupScheduleRequest true "Backup schedule creation request" @@ -258,7 +258,7 @@ func (h *Handler) CreateBackupSchedule(w http.ResponseWriter, r *http.Request) e // ListBackupSchedules godoc // @Summary List all backup schedules // @Description Get a list of all backup schedules -// @Tags backup-schedules +// @Tags Backup Schedules // @Accept json // @Produce json // @Success 200 {array} BackupScheduleResponse @@ -281,7 +281,7 @@ func (h *Handler) ListBackupSchedules(w http.ResponseWriter, r *http.Request) er // GetBackupSchedule godoc // @Summary Get a backup schedule by ID // @Description Get detailed information about a specific backup schedule -// @Tags backup-schedules +// @Tags Backup Schedules // @Accept json // @Produce json // @Param id path int true "Schedule ID" @@ -317,7 +317,7 @@ func (h *Handler) GetBackupSchedule(w http.ResponseWriter, r *http.Request) erro // EnableBackupSchedule godoc // @Summary Enable a backup schedule // @Description Enable a backup schedule to start running -// @Tags backup-schedules +// @Tags Backup Schedules // @Accept json // @Produce json // @Param id path int true "Schedule ID" @@ -353,7 +353,7 @@ func (h *Handler) EnableBackupSchedule(w http.ResponseWriter, r *http.Request) e // DisableBackupSchedule godoc // @Summary Disable a backup schedule // @Description Disable a backup schedule to stop it from running -// @Tags backup-schedules +// @Tags Backup Schedules // @Accept json // @Produce json // @Param id path int true "Schedule ID" @@ -389,7 +389,7 @@ func (h *Handler) DisableBackupSchedule(w http.ResponseWriter, r *http.Request) // DeleteBackupSchedule godoc // @Summary Delete a backup schedule // @Description Delete a backup schedule and stop its execution -// @Tags backup-schedules +// @Tags Backup Schedules // @Accept json // @Produce json // @Param id path int true "Schedule ID" @@ -424,7 +424,7 @@ func (h *Handler) DeleteBackupSchedule(w http.ResponseWriter, r *http.Request) e // ListBackups godoc // @Summary List all backups // @Description Get a list of all backups -// @Tags backups +// @Tags Backups // @Accept json // @Produce json // @Success 200 {array} BackupResponse @@ -447,7 +447,7 @@ func (h *Handler) ListBackups(w http.ResponseWriter, r *http.Request) error { // CreateBackup godoc // @Summary Create a new backup // @Description Create a new backup with the specified configuration -// @Tags backups +// @Tags Backups // @Accept json // @Produce json // @Param request body CreateBackupRequest true "Backup creation request" @@ -505,7 +505,7 @@ func (h *Handler) CreateBackup(w http.ResponseWriter, r *http.Request) error { // GetBackup godoc // @Summary Get a backup by ID // @Description Get detailed information about a specific backup -// @Tags backups +// @Tags Backups // @Accept json // @Produce json // @Param id path int true "Backup ID" @@ -541,7 +541,7 @@ func (h *Handler) GetBackup(w http.ResponseWriter, r *http.Request) error { // DeleteBackup godoc // @Summary Delete a backup // @Description Delete a backup and its associated files -// @Tags backups +// @Tags Backups // @Accept json // @Produce json // @Param id path int true "Backup ID" @@ -576,7 +576,7 @@ func (h *Handler) DeleteBackup(w http.ResponseWriter, r *http.Request) error { // UpdateBackupTarget godoc // @Summary Update a backup target // @Description Update an existing backup target with new configuration -// @Tags backup-targets +// @Tags Backup Targets // @Accept json // @Produce json // @Param id path int true "Backup Target ID" @@ -644,7 +644,7 @@ func (h *Handler) UpdateBackupTarget(w http.ResponseWriter, r *http.Request) err // UpdateBackupSchedule godoc // @Summary Update a backup schedule // @Description Update an existing backup schedule with new configuration -// @Tags backup-schedules +// @Tags Backup Schedules // @Accept json // @Produce json // @Param id path int true "Schedule ID" diff --git a/pkg/backups/service/service.go b/pkg/backups/service/service.go index d32468b..e81c5a8 100644 --- a/pkg/backups/service/service.go +++ b/pkg/backups/service/service.go @@ -18,6 +18,7 @@ import ( "crypto/rand" "encoding/base64" + "github.com/chainlaunch/chainlaunch/pkg/config" "github.com/chainlaunch/chainlaunch/pkg/db" "github.com/chainlaunch/chainlaunch/pkg/logger" "github.com/chainlaunch/chainlaunch/pkg/notifications" @@ -38,6 +39,7 @@ type BackupService struct { mu sync.Mutex stopCh chan struct{} databasePath string + configService *config.ConfigService } // NewBackupService creates a new backup service @@ -105,7 +107,7 @@ func (s *BackupService) CreateBackupTarget(ctx context.Context, params CreateBac return nil, fmt.Errorf("failed to generate restic password: %w", err) } - target, err := s.queries.CreateBackupTarget(ctx, db.CreateBackupTargetParams{ + target, err := s.queries.CreateBackupTarget(ctx, &db.CreateBackupTargetParams{ Name: params.Name, Type: string(params.Type), BucketName: sql.NullString{String: params.BucketName, Valid: params.BucketName != ""}, @@ -138,7 +140,7 @@ func (s *BackupService) CreateBackupTarget(ctx context.Context, params CreateBac // CreateBackupSchedule creates a new backup schedule func (s *BackupService) CreateBackupSchedule(ctx context.Context, params CreateBackupScheduleParams) (*BackupScheduleDTO, error) { - schedule, err := s.queries.CreateBackupSchedule(ctx, db.CreateBackupScheduleParams{ + schedule, err := s.queries.CreateBackupSchedule(ctx, &db.CreateBackupScheduleParams{ Name: params.Name, Description: sql.NullString{String: params.Description, Valid: params.Description != ""}, CronExpression: params.CronExpression, @@ -172,7 +174,7 @@ func (s *BackupService) CreateBackupSchedule(ctx context.Context, params CreateB // CreateBackup creates a new backup func (s *BackupService) CreateBackup(ctx context.Context, params CreateBackupParams) (*BackupDTO, error) { - backup, err := s.queries.CreateBackup(ctx, db.CreateBackupParams{ + backup, err := s.queries.CreateBackup(ctx, &db.CreateBackupParams{ ScheduleID: sql.NullInt64{Int64: *params.ScheduleID, Valid: params.ScheduleID != nil}, TargetID: params.TargetID, Status: string(BackupStatusPending), @@ -205,7 +207,7 @@ func (s *BackupService) TriggerBackup(ctx context.Context, sourceID int64, targe } // getResticRepoURL constructs the repository URL based on the target configuration -func (s *BackupService) getResticRepoURL(target db.BackupTarget) (string, error) { +func (s *BackupService) getResticRepoURL(target *db.BackupTarget) (string, error) { if !target.BucketName.Valid || !target.BucketPath.Valid { return "", fmt.Errorf("invalid bucket configuration") } @@ -279,7 +281,7 @@ func (s *BackupService) getBackupSize(env []string) (int64, error) { } // Update performS3Backup to include S3 connection issue notifications -func (s *BackupService) performS3Backup(ctx context.Context, backup db.Backup, target db.BackupTarget) error { +func (s *BackupService) performS3Backup(ctx context.Context, backup *db.Backup, target *db.BackupTarget) error { if !target.Endpoint.Valid || target.Endpoint.String == "" { return fmt.Errorf("backup configuration error: endpoint is required") } @@ -323,14 +325,8 @@ func (s *BackupService) performS3Backup(ctx context.Context, backup db.Backup, t return fmt.Errorf("failed to initialize restic repository: %w", err) } - // Get home directory - homeDir, err := os.UserHomeDir() - if err != nil { - return fmt.Errorf("system error: failed to get home directory: %w", err) - } - // Construct .chainlaunch path - chainlaunchPath := filepath.Join(homeDir, ".chainlaunch") + chainlaunchPath := s.configService.GetDataPath() // Check if directory exists if _, err := os.Stat(chainlaunchPath); os.IsNotExist(err) { @@ -460,7 +456,7 @@ func (s *BackupService) performS3Backup(ctx context.Context, backup db.Backup, t return fmt.Errorf("failed to get backup size: %w", err) } // Update backup size - _, err = s.queries.UpdateBackupSize(ctx, db.UpdateBackupSizeParams{ + _, err = s.queries.UpdateBackupSize(ctx, &db.UpdateBackupSizeParams{ ID: backup.ID, SizeBytes: sql.NullInt64{Int64: backupSize, Valid: true}, }) @@ -469,7 +465,7 @@ func (s *BackupService) performS3Backup(ctx context.Context, backup db.Backup, t } // notifyS3ConnectionIssue sends a notification for S3 connection issues -func (s *BackupService) notifyS3ConnectionIssue(ctx context.Context, target db.BackupTarget, errorMessage string) { +func (s *BackupService) notifyS3ConnectionIssue(ctx context.Context, target *db.BackupTarget, errorMessage string) { // Skip notification if notification service is not available if s.notificationService == nil { s.logger.Info("Notification service not available, skipping S3 connection issue notification") @@ -497,7 +493,7 @@ func (s *BackupService) notifyS3ConnectionIssue(ctx context.Context, target db.B // markBackupFailed marks a backup as failed with an error message func (s *BackupService) markBackupFailed(ctx context.Context, backupID int64, errorMessage string) { - s.queries.UpdateBackupFailed(ctx, db.UpdateBackupFailedParams{ + s.queries.UpdateBackupFailed(ctx, &db.UpdateBackupFailedParams{ ID: backupID, Status: string(BackupStatusFailed), ErrorMessage: sql.NullString{String: errorMessage, Valid: true}, @@ -506,7 +502,7 @@ func (s *BackupService) markBackupFailed(ctx context.Context, backupID int64, er } // scheduleBackup adds a backup schedule to the cron scheduler -func (s *BackupService) scheduleBackup(schedule db.BackupSchedule) { +func (s *BackupService) scheduleBackup(schedule *db.BackupSchedule) { s.mu.Lock() defer s.mu.Unlock() @@ -532,9 +528,9 @@ func (s *BackupService) scheduleBackup(schedule db.BackupSchedule) { } // createScheduledBackup creates a backup from a schedule -func (s *BackupService) createScheduledBackup(ctx context.Context, schedule db.BackupSchedule) { +func (s *BackupService) createScheduledBackup(ctx context.Context, schedule *db.BackupSchedule) { // Create backup entry - backup, err := s.queries.CreateBackup(ctx, db.CreateBackupParams{ + backup, err := s.queries.CreateBackup(ctx, &db.CreateBackupParams{ ScheduleID: sql.NullInt64{Int64: schedule.ID, Valid: true}, TargetID: schedule.TargetID, Status: string(BackupStatusPending), @@ -545,7 +541,7 @@ func (s *BackupService) createScheduledBackup(ctx context.Context, schedule db.B } // Update schedule's last run time - s.queries.UpdateBackupScheduleLastRun(ctx, db.UpdateBackupScheduleLastRunParams{ + s.queries.UpdateBackupScheduleLastRun(ctx, &db.UpdateBackupScheduleLastRunParams{ ID: schedule.ID, LastRunAt: sql.NullTime{Time: time.Now(), Valid: true}, }) @@ -555,11 +551,11 @@ func (s *BackupService) createScheduledBackup(ctx context.Context, schedule db.B } // performBackup executes the actual backup process -func (s *BackupService) performBackup(backup db.Backup) { +func (s *BackupService) performBackup(backup *db.Backup) { ctx := context.Background() // Update status to in progress - _, err := s.queries.UpdateBackupStatus(ctx, db.UpdateBackupStatusParams{ + _, err := s.queries.UpdateBackupStatus(ctx, &db.UpdateBackupStatusParams{ ID: backup.ID, Status: string(BackupStatusInProgress), }) @@ -600,7 +596,7 @@ func (s *BackupService) performBackup(backup db.Backup) { } // Mark backup as completed - updatedBackup, err := s.queries.UpdateBackupCompleted(ctx, db.UpdateBackupCompletedParams{ + updatedBackup, err := s.queries.UpdateBackupCompleted(ctx, &db.UpdateBackupCompletedParams{ ID: backup.ID, Status: string(BackupStatusCompleted), CompletedAt: sql.NullTime{Time: time.Now(), Valid: true}, @@ -781,7 +777,7 @@ func (s *BackupService) DeleteBackupSchedule(ctx context.Context, id int64) erro // ListBackups returns all backups func (s *BackupService) ListBackups(ctx context.Context) ([]*BackupDTO, error) { - backups, err := s.queries.ListBackups(ctx, db.ListBackupsParams{ + backups, err := s.queries.ListBackups(ctx, &db.ListBackupsParams{ Limit: 100, Offset: 0, }) @@ -861,7 +857,7 @@ func (s *BackupService) DeleteBackup(ctx context.Context, id int64) error { } // deleteBackupFile deletes the actual backup file from storage -func (s *BackupService) deleteBackupFile(ctx context.Context, backup db.Backup, target db.BackupTarget) error { +func (s *BackupService) deleteBackupFile(ctx context.Context, backup *db.Backup, target *db.BackupTarget) error { switch BackupTargetType(target.Type) { case BackupTargetTypeS3: return s.deleteS3BackupFile(ctx, backup, target) @@ -871,7 +867,7 @@ func (s *BackupService) deleteBackupFile(ctx context.Context, backup db.Backup, } // deleteS3BackupFile deletes a backup file from S3 using restic -func (s *BackupService) deleteS3BackupFile(ctx context.Context, backup db.Backup, target db.BackupTarget) error { +func (s *BackupService) deleteS3BackupFile(ctx context.Context, backup *db.Backup, target *db.BackupTarget) error { // Set up restic environment variables env := []string{ fmt.Sprintf("AWS_ACCESS_KEY_ID=%s", target.AccessKeyID.String), @@ -976,7 +972,7 @@ func (s *BackupService) UpdateBackupTarget(ctx context.Context, params UpdateBac } // Update the target - target, err := s.queries.UpdateBackupTarget(ctx, db.UpdateBackupTargetParams{ + target, err := s.queries.UpdateBackupTarget(ctx, &db.UpdateBackupTargetParams{ ID: params.ID, Name: params.Name, Type: string(params.Type), @@ -1022,7 +1018,7 @@ func (s *BackupService) UpdateBackupSchedule(ctx context.Context, params UpdateB } // Update the schedule - schedule, err := s.queries.UpdateBackupSchedule(ctx, db.UpdateBackupScheduleParams{ + schedule, err := s.queries.UpdateBackupSchedule(ctx, &db.UpdateBackupScheduleParams{ ID: params.ID, Name: params.Name, Description: sql.NullString{String: params.Description, Valid: params.Description != ""}, @@ -1082,7 +1078,7 @@ func (s *BackupService) UpdateBackupSchedule(ctx context.Context, params UpdateB } // notifyBackupSuccess sends a notification about a successful backup -func (s *BackupService) notifyBackupSuccess(ctx context.Context, backup db.Backup) { +func (s *BackupService) notifyBackupSuccess(ctx context.Context, backup *db.Backup) { // Skip notification if notification service is not available if s.notificationService == nil { s.logger.Info("Notification service not available, skipping backup success notification") @@ -1151,7 +1147,7 @@ func (s *BackupService) notifyBackupSuccess(ctx context.Context, backup db.Backu } // notifyBackupFailure sends a notification about a failed backup -func (s *BackupService) notifyBackupFailure(ctx context.Context, backup db.Backup, errorMessage string) { +func (s *BackupService) notifyBackupFailure(ctx context.Context, backup *db.Backup, errorMessage string) { // Skip notification if notification service is not available if s.notificationService == nil { s.logger.Info("Notification service not available, skipping backup failure notification") diff --git a/pkg/binaries/downloader.go b/pkg/binaries/downloader.go index 2d25f57..c1d3ae5 100644 --- a/pkg/binaries/downloader.go +++ b/pkg/binaries/downloader.go @@ -10,6 +10,8 @@ import ( "path/filepath" "runtime" "strings" + + "github.com/chainlaunch/chainlaunch/pkg/config" ) const ( @@ -28,16 +30,16 @@ const ( // BinaryDownloader handles downloading and managing Fabric binaries type BinaryDownloader struct { - homeDir string + configService *config.ConfigService } // NewBinaryDownloader creates a new BinaryDownloader instance -func NewBinaryDownloader(homeDir string) (*BinaryDownloader, error) { - binDir := filepath.Join(homeDir, ".chainlaunch", "bin") +func NewBinaryDownloader(configService *config.ConfigService) (*BinaryDownloader, error) { + binDir := filepath.Join(configService.GetDataPath(), "bin") if err := os.MkdirAll(binDir, 0755); err != nil { return nil, fmt.Errorf("failed to create binary directory: %w", err) } - return &BinaryDownloader{homeDir: homeDir}, nil + return &BinaryDownloader{configService: configService}, nil } // GetBinaryPath returns the path to the binary, downloading it if necessary @@ -46,7 +48,7 @@ func (d *BinaryDownloader) GetBinaryPath(binaryType BinaryType, version string) version = DefaultVersion } - binDir := filepath.Join(d.homeDir, ".chainlaunch", "bin") + binDir := filepath.Join(d.configService.GetDataPath(), "bin") binaryName := string(binaryType) if runtime.GOOS == "windows" { binaryName += ".exe" diff --git a/pkg/config/service.go b/pkg/config/service.go new file mode 100644 index 0000000..4e7b177 --- /dev/null +++ b/pkg/config/service.go @@ -0,0 +1,17 @@ +package config + +// ConfigService handles configuration paths and directories +type ConfigService struct { + dataPath string +} + +// NewConfigService creates a new ConfigService instance +func NewConfigService(dataPath string) *ConfigService { + return &ConfigService{ + dataPath: dataPath, + } +} + +func (s *ConfigService) GetDataPath() string { + return s.dataPath +} diff --git a/pkg/db/db.go b/pkg/db/db.go index 13c834d..faacf09 100644 --- a/pkg/db/db.go +++ b/pkg/db/db.go @@ -7,7 +7,6 @@ package db import ( "context" "database/sql" - "fmt" ) type DBTX interface { @@ -21,1358 +20,12 @@ func New(db DBTX) *Queries { return &Queries{db: db} } -func Prepare(ctx context.Context, db DBTX) (*Queries, error) { - q := Queries{db: db} - var err error - if q.checkNetworkNodeExistsStmt, err = db.PrepareContext(ctx, checkNetworkNodeExists); err != nil { - return nil, fmt.Errorf("error preparing query CheckNetworkNodeExists: %w", err) - } - if q.countBackupsByScheduleStmt, err = db.PrepareContext(ctx, countBackupsBySchedule); err != nil { - return nil, fmt.Errorf("error preparing query CountBackupsBySchedule: %w", err) - } - if q.countBackupsByTargetStmt, err = db.PrepareContext(ctx, countBackupsByTarget); err != nil { - return nil, fmt.Errorf("error preparing query CountBackupsByTarget: %w", err) - } - if q.countNetworksStmt, err = db.PrepareContext(ctx, countNetworks); err != nil { - return nil, fmt.Errorf("error preparing query CountNetworks: %w", err) - } - if q.countNodeEventsStmt, err = db.PrepareContext(ctx, countNodeEvents); err != nil { - return nil, fmt.Errorf("error preparing query CountNodeEvents: %w", err) - } - if q.countNodesStmt, err = db.PrepareContext(ctx, countNodes); err != nil { - return nil, fmt.Errorf("error preparing query CountNodes: %w", err) - } - if q.countNodesByPlatformStmt, err = db.PrepareContext(ctx, countNodesByPlatform); err != nil { - return nil, fmt.Errorf("error preparing query CountNodesByPlatform: %w", err) - } - if q.countUsersStmt, err = db.PrepareContext(ctx, countUsers); err != nil { - return nil, fmt.Errorf("error preparing query CountUsers: %w", err) - } - if q.createBackupStmt, err = db.PrepareContext(ctx, createBackup); err != nil { - return nil, fmt.Errorf("error preparing query CreateBackup: %w", err) - } - if q.createBackupScheduleStmt, err = db.PrepareContext(ctx, createBackupSchedule); err != nil { - return nil, fmt.Errorf("error preparing query CreateBackupSchedule: %w", err) - } - if q.createBackupTargetStmt, err = db.PrepareContext(ctx, createBackupTarget); err != nil { - return nil, fmt.Errorf("error preparing query CreateBackupTarget: %w", err) - } - if q.createFabricOrganizationStmt, err = db.PrepareContext(ctx, createFabricOrganization); err != nil { - return nil, fmt.Errorf("error preparing query CreateFabricOrganization: %w", err) - } - if q.createKeyStmt, err = db.PrepareContext(ctx, createKey); err != nil { - return nil, fmt.Errorf("error preparing query CreateKey: %w", err) - } - if q.createKeyProviderStmt, err = db.PrepareContext(ctx, createKeyProvider); err != nil { - return nil, fmt.Errorf("error preparing query CreateKeyProvider: %w", err) - } - if q.createNetworkStmt, err = db.PrepareContext(ctx, createNetwork); err != nil { - return nil, fmt.Errorf("error preparing query CreateNetwork: %w", err) - } - if q.createNetworkFullStmt, err = db.PrepareContext(ctx, createNetworkFull); err != nil { - return nil, fmt.Errorf("error preparing query CreateNetworkFull: %w", err) - } - if q.createNetworkNodeStmt, err = db.PrepareContext(ctx, createNetworkNode); err != nil { - return nil, fmt.Errorf("error preparing query CreateNetworkNode: %w", err) - } - if q.createNodeStmt, err = db.PrepareContext(ctx, createNode); err != nil { - return nil, fmt.Errorf("error preparing query CreateNode: %w", err) - } - if q.createNodeEventStmt, err = db.PrepareContext(ctx, createNodeEvent); err != nil { - return nil, fmt.Errorf("error preparing query CreateNodeEvent: %w", err) - } - if q.createNotificationProviderStmt, err = db.PrepareContext(ctx, createNotificationProvider); err != nil { - return nil, fmt.Errorf("error preparing query CreateNotificationProvider: %w", err) - } - if q.createSessionStmt, err = db.PrepareContext(ctx, createSession); err != nil { - return nil, fmt.Errorf("error preparing query CreateSession: %w", err) - } - if q.createUserStmt, err = db.PrepareContext(ctx, createUser); err != nil { - return nil, fmt.Errorf("error preparing query CreateUser: %w", err) - } - if q.deleteBackupStmt, err = db.PrepareContext(ctx, deleteBackup); err != nil { - return nil, fmt.Errorf("error preparing query DeleteBackup: %w", err) - } - if q.deleteBackupScheduleStmt, err = db.PrepareContext(ctx, deleteBackupSchedule); err != nil { - return nil, fmt.Errorf("error preparing query DeleteBackupSchedule: %w", err) - } - if q.deleteBackupTargetStmt, err = db.PrepareContext(ctx, deleteBackupTarget); err != nil { - return nil, fmt.Errorf("error preparing query DeleteBackupTarget: %w", err) - } - if q.deleteBackupsByScheduleStmt, err = db.PrepareContext(ctx, deleteBackupsBySchedule); err != nil { - return nil, fmt.Errorf("error preparing query DeleteBackupsBySchedule: %w", err) - } - if q.deleteBackupsByTargetStmt, err = db.PrepareContext(ctx, deleteBackupsByTarget); err != nil { - return nil, fmt.Errorf("error preparing query DeleteBackupsByTarget: %w", err) - } - if q.deleteExpiredSessionsStmt, err = db.PrepareContext(ctx, deleteExpiredSessions); err != nil { - return nil, fmt.Errorf("error preparing query DeleteExpiredSessions: %w", err) - } - if q.deleteFabricOrganizationStmt, err = db.PrepareContext(ctx, deleteFabricOrganization); err != nil { - return nil, fmt.Errorf("error preparing query DeleteFabricOrganization: %w", err) - } - if q.deleteKeyStmt, err = db.PrepareContext(ctx, deleteKey); err != nil { - return nil, fmt.Errorf("error preparing query DeleteKey: %w", err) - } - if q.deleteKeyProviderStmt, err = db.PrepareContext(ctx, deleteKeyProvider); err != nil { - return nil, fmt.Errorf("error preparing query DeleteKeyProvider: %w", err) - } - if q.deleteNetworkStmt, err = db.PrepareContext(ctx, deleteNetwork); err != nil { - return nil, fmt.Errorf("error preparing query DeleteNetwork: %w", err) - } - if q.deleteNetworkNodeStmt, err = db.PrepareContext(ctx, deleteNetworkNode); err != nil { - return nil, fmt.Errorf("error preparing query DeleteNetworkNode: %w", err) - } - if q.deleteNodeStmt, err = db.PrepareContext(ctx, deleteNode); err != nil { - return nil, fmt.Errorf("error preparing query DeleteNode: %w", err) - } - if q.deleteNotificationProviderStmt, err = db.PrepareContext(ctx, deleteNotificationProvider); err != nil { - return nil, fmt.Errorf("error preparing query DeleteNotificationProvider: %w", err) - } - if q.deleteOldBackupsStmt, err = db.PrepareContext(ctx, deleteOldBackups); err != nil { - return nil, fmt.Errorf("error preparing query DeleteOldBackups: %w", err) - } - if q.deleteSessionStmt, err = db.PrepareContext(ctx, deleteSession); err != nil { - return nil, fmt.Errorf("error preparing query DeleteSession: %w", err) - } - if q.deleteUserStmt, err = db.PrepareContext(ctx, deleteUser); err != nil { - return nil, fmt.Errorf("error preparing query DeleteUser: %w", err) - } - if q.deleteUserSessionsStmt, err = db.PrepareContext(ctx, deleteUserSessions); err != nil { - return nil, fmt.Errorf("error preparing query DeleteUserSessions: %w", err) - } - if q.disableBackupScheduleStmt, err = db.PrepareContext(ctx, disableBackupSchedule); err != nil { - return nil, fmt.Errorf("error preparing query DisableBackupSchedule: %w", err) - } - if q.enableBackupScheduleStmt, err = db.PrepareContext(ctx, enableBackupSchedule); err != nil { - return nil, fmt.Errorf("error preparing query EnableBackupSchedule: %w", err) - } - if q.getAllKeysStmt, err = db.PrepareContext(ctx, getAllKeys); err != nil { - return nil, fmt.Errorf("error preparing query GetAllKeys: %w", err) - } - if q.getAllNodesStmt, err = db.PrepareContext(ctx, getAllNodes); err != nil { - return nil, fmt.Errorf("error preparing query GetAllNodes: %w", err) - } - if q.getBackupStmt, err = db.PrepareContext(ctx, getBackup); err != nil { - return nil, fmt.Errorf("error preparing query GetBackup: %w", err) - } - if q.getBackupScheduleStmt, err = db.PrepareContext(ctx, getBackupSchedule); err != nil { - return nil, fmt.Errorf("error preparing query GetBackupSchedule: %w", err) - } - if q.getBackupTargetStmt, err = db.PrepareContext(ctx, getBackupTarget); err != nil { - return nil, fmt.Errorf("error preparing query GetBackupTarget: %w", err) - } - if q.getBackupsByDateRangeStmt, err = db.PrepareContext(ctx, getBackupsByDateRange); err != nil { - return nil, fmt.Errorf("error preparing query GetBackupsByDateRange: %w", err) - } - if q.getBackupsByScheduleAndStatusStmt, err = db.PrepareContext(ctx, getBackupsByScheduleAndStatus); err != nil { - return nil, fmt.Errorf("error preparing query GetBackupsByScheduleAndStatus: %w", err) - } - if q.getBackupsByStatusStmt, err = db.PrepareContext(ctx, getBackupsByStatus); err != nil { - return nil, fmt.Errorf("error preparing query GetBackupsByStatus: %w", err) - } - if q.getDefaultNotificationProviderStmt, err = db.PrepareContext(ctx, getDefaultNotificationProvider); err != nil { - return nil, fmt.Errorf("error preparing query GetDefaultNotificationProvider: %w", err) - } - if q.getDefaultNotificationProviderForTypeStmt, err = db.PrepareContext(ctx, getDefaultNotificationProviderForType); err != nil { - return nil, fmt.Errorf("error preparing query GetDefaultNotificationProviderForType: %w", err) - } - if q.getFabricOrganizationStmt, err = db.PrepareContext(ctx, getFabricOrganization); err != nil { - return nil, fmt.Errorf("error preparing query GetFabricOrganization: %w", err) - } - if q.getFabricOrganizationByIDStmt, err = db.PrepareContext(ctx, getFabricOrganizationByID); err != nil { - return nil, fmt.Errorf("error preparing query GetFabricOrganizationByID: %w", err) - } - if q.getFabricOrganizationByMSPIDStmt, err = db.PrepareContext(ctx, getFabricOrganizationByMSPID); err != nil { - return nil, fmt.Errorf("error preparing query GetFabricOrganizationByMSPID: %w", err) - } - if q.getFabricOrganizationByMspIDStmt, err = db.PrepareContext(ctx, getFabricOrganizationByMspID); err != nil { - return nil, fmt.Errorf("error preparing query GetFabricOrganizationByMspID: %w", err) - } - if q.getFabricOrganizationWithKeysStmt, err = db.PrepareContext(ctx, getFabricOrganizationWithKeys); err != nil { - return nil, fmt.Errorf("error preparing query GetFabricOrganizationWithKeys: %w", err) - } - if q.getKeyStmt, err = db.PrepareContext(ctx, getKey); err != nil { - return nil, fmt.Errorf("error preparing query GetKey: %w", err) - } - if q.getKeyByEthereumAddressStmt, err = db.PrepareContext(ctx, getKeyByEthereumAddress); err != nil { - return nil, fmt.Errorf("error preparing query GetKeyByEthereumAddress: %w", err) - } - if q.getKeyByIDStmt, err = db.PrepareContext(ctx, getKeyByID); err != nil { - return nil, fmt.Errorf("error preparing query GetKeyByID: %w", err) - } - if q.getKeyCountByProviderStmt, err = db.PrepareContext(ctx, getKeyCountByProvider); err != nil { - return nil, fmt.Errorf("error preparing query GetKeyCountByProvider: %w", err) - } - if q.getKeyProviderStmt, err = db.PrepareContext(ctx, getKeyProvider); err != nil { - return nil, fmt.Errorf("error preparing query GetKeyProvider: %w", err) - } - if q.getKeyProviderByDefaultStmt, err = db.PrepareContext(ctx, getKeyProviderByDefault); err != nil { - return nil, fmt.Errorf("error preparing query GetKeyProviderByDefault: %w", err) - } - if q.getKeyProviderByIDStmt, err = db.PrepareContext(ctx, getKeyProviderByID); err != nil { - return nil, fmt.Errorf("error preparing query GetKeyProviderByID: %w", err) - } - if q.getKeysByAlgorithmStmt, err = db.PrepareContext(ctx, getKeysByAlgorithm); err != nil { - return nil, fmt.Errorf("error preparing query GetKeysByAlgorithm: %w", err) - } - if q.getKeysByProviderAndCurveStmt, err = db.PrepareContext(ctx, getKeysByProviderAndCurve); err != nil { - return nil, fmt.Errorf("error preparing query GetKeysByProviderAndCurve: %w", err) - } - if q.getKeysCountStmt, err = db.PrepareContext(ctx, getKeysCount); err != nil { - return nil, fmt.Errorf("error preparing query GetKeysCount: %w", err) - } - if q.getLatestNodeEventStmt, err = db.PrepareContext(ctx, getLatestNodeEvent); err != nil { - return nil, fmt.Errorf("error preparing query GetLatestNodeEvent: %w", err) - } - if q.getNetworkStmt, err = db.PrepareContext(ctx, getNetwork); err != nil { - return nil, fmt.Errorf("error preparing query GetNetwork: %w", err) - } - if q.getNetworkByNameStmt, err = db.PrepareContext(ctx, getNetworkByName); err != nil { - return nil, fmt.Errorf("error preparing query GetNetworkByName: %w", err) - } - if q.getNetworkByNetworkIdStmt, err = db.PrepareContext(ctx, getNetworkByNetworkId); err != nil { - return nil, fmt.Errorf("error preparing query GetNetworkByNetworkId: %w", err) - } - if q.getNetworkCurrentConfigBlockStmt, err = db.PrepareContext(ctx, getNetworkCurrentConfigBlock); err != nil { - return nil, fmt.Errorf("error preparing query GetNetworkCurrentConfigBlock: %w", err) - } - if q.getNetworkNodeStmt, err = db.PrepareContext(ctx, getNetworkNode); err != nil { - return nil, fmt.Errorf("error preparing query GetNetworkNode: %w", err) - } - if q.getNetworkNodesStmt, err = db.PrepareContext(ctx, getNetworkNodes); err != nil { - return nil, fmt.Errorf("error preparing query GetNetworkNodes: %w", err) - } - if q.getNodeStmt, err = db.PrepareContext(ctx, getNode); err != nil { - return nil, fmt.Errorf("error preparing query GetNode: %w", err) - } - if q.getNodeBySlugStmt, err = db.PrepareContext(ctx, getNodeBySlug); err != nil { - return nil, fmt.Errorf("error preparing query GetNodeBySlug: %w", err) - } - if q.getNodeEventStmt, err = db.PrepareContext(ctx, getNodeEvent); err != nil { - return nil, fmt.Errorf("error preparing query GetNodeEvent: %w", err) - } - if q.getNotificationProviderStmt, err = db.PrepareContext(ctx, getNotificationProvider); err != nil { - return nil, fmt.Errorf("error preparing query GetNotificationProvider: %w", err) - } - if q.getOldestBackupByTargetStmt, err = db.PrepareContext(ctx, getOldestBackupByTarget); err != nil { - return nil, fmt.Errorf("error preparing query GetOldestBackupByTarget: %w", err) - } - if q.getOrdererPortsStmt, err = db.PrepareContext(ctx, getOrdererPorts); err != nil { - return nil, fmt.Errorf("error preparing query GetOrdererPorts: %w", err) - } - if q.getPeerPortsStmt, err = db.PrepareContext(ctx, getPeerPorts); err != nil { - return nil, fmt.Errorf("error preparing query GetPeerPorts: %w", err) - } - if q.getProvidersByNotificationTypeStmt, err = db.PrepareContext(ctx, getProvidersByNotificationType); err != nil { - return nil, fmt.Errorf("error preparing query GetProvidersByNotificationType: %w", err) - } - if q.getRecentCompletedBackupsStmt, err = db.PrepareContext(ctx, getRecentCompletedBackups); err != nil { - return nil, fmt.Errorf("error preparing query GetRecentCompletedBackups: %w", err) - } - if q.getSessionStmt, err = db.PrepareContext(ctx, getSession); err != nil { - return nil, fmt.Errorf("error preparing query GetSession: %w", err) - } - if q.getUserStmt, err = db.PrepareContext(ctx, getUser); err != nil { - return nil, fmt.Errorf("error preparing query GetUser: %w", err) - } - if q.getUserByUsernameStmt, err = db.PrepareContext(ctx, getUserByUsername); err != nil { - return nil, fmt.Errorf("error preparing query GetUserByUsername: %w", err) - } - if q.listBackupSchedulesStmt, err = db.PrepareContext(ctx, listBackupSchedules); err != nil { - return nil, fmt.Errorf("error preparing query ListBackupSchedules: %w", err) - } - if q.listBackupTargetsStmt, err = db.PrepareContext(ctx, listBackupTargets); err != nil { - return nil, fmt.Errorf("error preparing query ListBackupTargets: %w", err) - } - if q.listBackupsStmt, err = db.PrepareContext(ctx, listBackups); err != nil { - return nil, fmt.Errorf("error preparing query ListBackups: %w", err) - } - if q.listBackupsByScheduleStmt, err = db.PrepareContext(ctx, listBackupsBySchedule); err != nil { - return nil, fmt.Errorf("error preparing query ListBackupsBySchedule: %w", err) - } - if q.listBackupsByTargetStmt, err = db.PrepareContext(ctx, listBackupsByTarget); err != nil { - return nil, fmt.Errorf("error preparing query ListBackupsByTarget: %w", err) - } - if q.listFabricOrganizationsStmt, err = db.PrepareContext(ctx, listFabricOrganizations); err != nil { - return nil, fmt.Errorf("error preparing query ListFabricOrganizations: %w", err) - } - if q.listFabricOrganizationsWithKeysStmt, err = db.PrepareContext(ctx, listFabricOrganizationsWithKeys); err != nil { - return nil, fmt.Errorf("error preparing query ListFabricOrganizationsWithKeys: %w", err) - } - if q.listKeyProvidersStmt, err = db.PrepareContext(ctx, listKeyProviders); err != nil { - return nil, fmt.Errorf("error preparing query ListKeyProviders: %w", err) - } - if q.listKeysStmt, err = db.PrepareContext(ctx, listKeys); err != nil { - return nil, fmt.Errorf("error preparing query ListKeys: %w", err) - } - if q.listNetworkNodesByNetworkStmt, err = db.PrepareContext(ctx, listNetworkNodesByNetwork); err != nil { - return nil, fmt.Errorf("error preparing query ListNetworkNodesByNetwork: %w", err) - } - if q.listNetworkNodesByNodeStmt, err = db.PrepareContext(ctx, listNetworkNodesByNode); err != nil { - return nil, fmt.Errorf("error preparing query ListNetworkNodesByNode: %w", err) - } - if q.listNetworksStmt, err = db.PrepareContext(ctx, listNetworks); err != nil { - return nil, fmt.Errorf("error preparing query ListNetworks: %w", err) - } - if q.listNodeEventsStmt, err = db.PrepareContext(ctx, listNodeEvents); err != nil { - return nil, fmt.Errorf("error preparing query ListNodeEvents: %w", err) - } - if q.listNodeEventsByTypeStmt, err = db.PrepareContext(ctx, listNodeEventsByType); err != nil { - return nil, fmt.Errorf("error preparing query ListNodeEventsByType: %w", err) - } - if q.listNodesStmt, err = db.PrepareContext(ctx, listNodes); err != nil { - return nil, fmt.Errorf("error preparing query ListNodes: %w", err) - } - if q.listNodesByNetworkStmt, err = db.PrepareContext(ctx, listNodesByNetwork); err != nil { - return nil, fmt.Errorf("error preparing query ListNodesByNetwork: %w", err) - } - if q.listNodesByPlatformStmt, err = db.PrepareContext(ctx, listNodesByPlatform); err != nil { - return nil, fmt.Errorf("error preparing query ListNodesByPlatform: %w", err) - } - if q.listNotificationProvidersStmt, err = db.PrepareContext(ctx, listNotificationProviders); err != nil { - return nil, fmt.Errorf("error preparing query ListNotificationProviders: %w", err) - } - if q.listUsersStmt, err = db.PrepareContext(ctx, listUsers); err != nil { - return nil, fmt.Errorf("error preparing query ListUsers: %w", err) - } - if q.markBackupNotifiedStmt, err = db.PrepareContext(ctx, markBackupNotified); err != nil { - return nil, fmt.Errorf("error preparing query MarkBackupNotified: %w", err) - } - if q.unsetDefaultNotificationProviderStmt, err = db.PrepareContext(ctx, unsetDefaultNotificationProvider); err != nil { - return nil, fmt.Errorf("error preparing query UnsetDefaultNotificationProvider: %w", err) - } - if q.unsetDefaultProviderStmt, err = db.PrepareContext(ctx, unsetDefaultProvider); err != nil { - return nil, fmt.Errorf("error preparing query UnsetDefaultProvider: %w", err) - } - if q.updateBackupCompletedStmt, err = db.PrepareContext(ctx, updateBackupCompleted); err != nil { - return nil, fmt.Errorf("error preparing query UpdateBackupCompleted: %w", err) - } - if q.updateBackupFailedStmt, err = db.PrepareContext(ctx, updateBackupFailed); err != nil { - return nil, fmt.Errorf("error preparing query UpdateBackupFailed: %w", err) - } - if q.updateBackupScheduleStmt, err = db.PrepareContext(ctx, updateBackupSchedule); err != nil { - return nil, fmt.Errorf("error preparing query UpdateBackupSchedule: %w", err) - } - if q.updateBackupScheduleLastRunStmt, err = db.PrepareContext(ctx, updateBackupScheduleLastRun); err != nil { - return nil, fmt.Errorf("error preparing query UpdateBackupScheduleLastRun: %w", err) - } - if q.updateBackupSizeStmt, err = db.PrepareContext(ctx, updateBackupSize); err != nil { - return nil, fmt.Errorf("error preparing query UpdateBackupSize: %w", err) - } - if q.updateBackupStatusStmt, err = db.PrepareContext(ctx, updateBackupStatus); err != nil { - return nil, fmt.Errorf("error preparing query UpdateBackupStatus: %w", err) - } - if q.updateBackupTargetStmt, err = db.PrepareContext(ctx, updateBackupTarget); err != nil { - return nil, fmt.Errorf("error preparing query UpdateBackupTarget: %w", err) - } - if q.updateFabricOrganizationStmt, err = db.PrepareContext(ctx, updateFabricOrganization); err != nil { - return nil, fmt.Errorf("error preparing query UpdateFabricOrganization: %w", err) - } - if q.updateKeyStmt, err = db.PrepareContext(ctx, updateKey); err != nil { - return nil, fmt.Errorf("error preparing query UpdateKey: %w", err) - } - if q.updateKeyProviderStmt, err = db.PrepareContext(ctx, updateKeyProvider); err != nil { - return nil, fmt.Errorf("error preparing query UpdateKeyProvider: %w", err) - } - if q.updateNetworkCurrentConfigBlockStmt, err = db.PrepareContext(ctx, updateNetworkCurrentConfigBlock); err != nil { - return nil, fmt.Errorf("error preparing query UpdateNetworkCurrentConfigBlock: %w", err) - } - if q.updateNetworkGenesisBlockStmt, err = db.PrepareContext(ctx, updateNetworkGenesisBlock); err != nil { - return nil, fmt.Errorf("error preparing query UpdateNetworkGenesisBlock: %w", err) - } - if q.updateNetworkNodeRoleStmt, err = db.PrepareContext(ctx, updateNetworkNodeRole); err != nil { - return nil, fmt.Errorf("error preparing query UpdateNetworkNodeRole: %w", err) - } - if q.updateNetworkNodeStatusStmt, err = db.PrepareContext(ctx, updateNetworkNodeStatus); err != nil { - return nil, fmt.Errorf("error preparing query UpdateNetworkNodeStatus: %w", err) - } - if q.updateNetworkStatusStmt, err = db.PrepareContext(ctx, updateNetworkStatus); err != nil { - return nil, fmt.Errorf("error preparing query UpdateNetworkStatus: %w", err) - } - if q.updateNodeDeploymentConfigStmt, err = db.PrepareContext(ctx, updateNodeDeploymentConfig); err != nil { - return nil, fmt.Errorf("error preparing query UpdateNodeDeploymentConfig: %w", err) - } - if q.updateNodeEndpointStmt, err = db.PrepareContext(ctx, updateNodeEndpoint); err != nil { - return nil, fmt.Errorf("error preparing query UpdateNodeEndpoint: %w", err) - } - if q.updateNodePublicEndpointStmt, err = db.PrepareContext(ctx, updateNodePublicEndpoint); err != nil { - return nil, fmt.Errorf("error preparing query UpdateNodePublicEndpoint: %w", err) - } - if q.updateNodeStatusStmt, err = db.PrepareContext(ctx, updateNodeStatus); err != nil { - return nil, fmt.Errorf("error preparing query UpdateNodeStatus: %w", err) - } - if q.updateNotificationProviderStmt, err = db.PrepareContext(ctx, updateNotificationProvider); err != nil { - return nil, fmt.Errorf("error preparing query UpdateNotificationProvider: %w", err) - } - if q.updateProviderTestResultsStmt, err = db.PrepareContext(ctx, updateProviderTestResults); err != nil { - return nil, fmt.Errorf("error preparing query UpdateProviderTestResults: %w", err) - } - if q.updateUserStmt, err = db.PrepareContext(ctx, updateUser); err != nil { - return nil, fmt.Errorf("error preparing query UpdateUser: %w", err) - } - if q.updateUserLastLoginStmt, err = db.PrepareContext(ctx, updateUserLastLogin); err != nil { - return nil, fmt.Errorf("error preparing query UpdateUserLastLogin: %w", err) - } - return &q, nil -} - -func (q *Queries) Close() error { - var err error - if q.checkNetworkNodeExistsStmt != nil { - if cerr := q.checkNetworkNodeExistsStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing checkNetworkNodeExistsStmt: %w", cerr) - } - } - if q.countBackupsByScheduleStmt != nil { - if cerr := q.countBackupsByScheduleStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing countBackupsByScheduleStmt: %w", cerr) - } - } - if q.countBackupsByTargetStmt != nil { - if cerr := q.countBackupsByTargetStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing countBackupsByTargetStmt: %w", cerr) - } - } - if q.countNetworksStmt != nil { - if cerr := q.countNetworksStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing countNetworksStmt: %w", cerr) - } - } - if q.countNodeEventsStmt != nil { - if cerr := q.countNodeEventsStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing countNodeEventsStmt: %w", cerr) - } - } - if q.countNodesStmt != nil { - if cerr := q.countNodesStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing countNodesStmt: %w", cerr) - } - } - if q.countNodesByPlatformStmt != nil { - if cerr := q.countNodesByPlatformStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing countNodesByPlatformStmt: %w", cerr) - } - } - if q.countUsersStmt != nil { - if cerr := q.countUsersStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing countUsersStmt: %w", cerr) - } - } - if q.createBackupStmt != nil { - if cerr := q.createBackupStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing createBackupStmt: %w", cerr) - } - } - if q.createBackupScheduleStmt != nil { - if cerr := q.createBackupScheduleStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing createBackupScheduleStmt: %w", cerr) - } - } - if q.createBackupTargetStmt != nil { - if cerr := q.createBackupTargetStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing createBackupTargetStmt: %w", cerr) - } - } - if q.createFabricOrganizationStmt != nil { - if cerr := q.createFabricOrganizationStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing createFabricOrganizationStmt: %w", cerr) - } - } - if q.createKeyStmt != nil { - if cerr := q.createKeyStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing createKeyStmt: %w", cerr) - } - } - if q.createKeyProviderStmt != nil { - if cerr := q.createKeyProviderStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing createKeyProviderStmt: %w", cerr) - } - } - if q.createNetworkStmt != nil { - if cerr := q.createNetworkStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing createNetworkStmt: %w", cerr) - } - } - if q.createNetworkFullStmt != nil { - if cerr := q.createNetworkFullStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing createNetworkFullStmt: %w", cerr) - } - } - if q.createNetworkNodeStmt != nil { - if cerr := q.createNetworkNodeStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing createNetworkNodeStmt: %w", cerr) - } - } - if q.createNodeStmt != nil { - if cerr := q.createNodeStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing createNodeStmt: %w", cerr) - } - } - if q.createNodeEventStmt != nil { - if cerr := q.createNodeEventStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing createNodeEventStmt: %w", cerr) - } - } - if q.createNotificationProviderStmt != nil { - if cerr := q.createNotificationProviderStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing createNotificationProviderStmt: %w", cerr) - } - } - if q.createSessionStmt != nil { - if cerr := q.createSessionStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing createSessionStmt: %w", cerr) - } - } - if q.createUserStmt != nil { - if cerr := q.createUserStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing createUserStmt: %w", cerr) - } - } - if q.deleteBackupStmt != nil { - if cerr := q.deleteBackupStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing deleteBackupStmt: %w", cerr) - } - } - if q.deleteBackupScheduleStmt != nil { - if cerr := q.deleteBackupScheduleStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing deleteBackupScheduleStmt: %w", cerr) - } - } - if q.deleteBackupTargetStmt != nil { - if cerr := q.deleteBackupTargetStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing deleteBackupTargetStmt: %w", cerr) - } - } - if q.deleteBackupsByScheduleStmt != nil { - if cerr := q.deleteBackupsByScheduleStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing deleteBackupsByScheduleStmt: %w", cerr) - } - } - if q.deleteBackupsByTargetStmt != nil { - if cerr := q.deleteBackupsByTargetStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing deleteBackupsByTargetStmt: %w", cerr) - } - } - if q.deleteExpiredSessionsStmt != nil { - if cerr := q.deleteExpiredSessionsStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing deleteExpiredSessionsStmt: %w", cerr) - } - } - if q.deleteFabricOrganizationStmt != nil { - if cerr := q.deleteFabricOrganizationStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing deleteFabricOrganizationStmt: %w", cerr) - } - } - if q.deleteKeyStmt != nil { - if cerr := q.deleteKeyStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing deleteKeyStmt: %w", cerr) - } - } - if q.deleteKeyProviderStmt != nil { - if cerr := q.deleteKeyProviderStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing deleteKeyProviderStmt: %w", cerr) - } - } - if q.deleteNetworkStmt != nil { - if cerr := q.deleteNetworkStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing deleteNetworkStmt: %w", cerr) - } - } - if q.deleteNetworkNodeStmt != nil { - if cerr := q.deleteNetworkNodeStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing deleteNetworkNodeStmt: %w", cerr) - } - } - if q.deleteNodeStmt != nil { - if cerr := q.deleteNodeStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing deleteNodeStmt: %w", cerr) - } - } - if q.deleteNotificationProviderStmt != nil { - if cerr := q.deleteNotificationProviderStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing deleteNotificationProviderStmt: %w", cerr) - } - } - if q.deleteOldBackupsStmt != nil { - if cerr := q.deleteOldBackupsStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing deleteOldBackupsStmt: %w", cerr) - } - } - if q.deleteSessionStmt != nil { - if cerr := q.deleteSessionStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing deleteSessionStmt: %w", cerr) - } - } - if q.deleteUserStmt != nil { - if cerr := q.deleteUserStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing deleteUserStmt: %w", cerr) - } - } - if q.deleteUserSessionsStmt != nil { - if cerr := q.deleteUserSessionsStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing deleteUserSessionsStmt: %w", cerr) - } - } - if q.disableBackupScheduleStmt != nil { - if cerr := q.disableBackupScheduleStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing disableBackupScheduleStmt: %w", cerr) - } - } - if q.enableBackupScheduleStmt != nil { - if cerr := q.enableBackupScheduleStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing enableBackupScheduleStmt: %w", cerr) - } - } - if q.getAllKeysStmt != nil { - if cerr := q.getAllKeysStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing getAllKeysStmt: %w", cerr) - } - } - if q.getAllNodesStmt != nil { - if cerr := q.getAllNodesStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing getAllNodesStmt: %w", cerr) - } - } - if q.getBackupStmt != nil { - if cerr := q.getBackupStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing getBackupStmt: %w", cerr) - } - } - if q.getBackupScheduleStmt != nil { - if cerr := q.getBackupScheduleStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing getBackupScheduleStmt: %w", cerr) - } - } - if q.getBackupTargetStmt != nil { - if cerr := q.getBackupTargetStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing getBackupTargetStmt: %w", cerr) - } - } - if q.getBackupsByDateRangeStmt != nil { - if cerr := q.getBackupsByDateRangeStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing getBackupsByDateRangeStmt: %w", cerr) - } - } - if q.getBackupsByScheduleAndStatusStmt != nil { - if cerr := q.getBackupsByScheduleAndStatusStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing getBackupsByScheduleAndStatusStmt: %w", cerr) - } - } - if q.getBackupsByStatusStmt != nil { - if cerr := q.getBackupsByStatusStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing getBackupsByStatusStmt: %w", cerr) - } - } - if q.getDefaultNotificationProviderStmt != nil { - if cerr := q.getDefaultNotificationProviderStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing getDefaultNotificationProviderStmt: %w", cerr) - } - } - if q.getDefaultNotificationProviderForTypeStmt != nil { - if cerr := q.getDefaultNotificationProviderForTypeStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing getDefaultNotificationProviderForTypeStmt: %w", cerr) - } - } - if q.getFabricOrganizationStmt != nil { - if cerr := q.getFabricOrganizationStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing getFabricOrganizationStmt: %w", cerr) - } - } - if q.getFabricOrganizationByIDStmt != nil { - if cerr := q.getFabricOrganizationByIDStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing getFabricOrganizationByIDStmt: %w", cerr) - } - } - if q.getFabricOrganizationByMSPIDStmt != nil { - if cerr := q.getFabricOrganizationByMSPIDStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing getFabricOrganizationByMSPIDStmt: %w", cerr) - } - } - if q.getFabricOrganizationByMspIDStmt != nil { - if cerr := q.getFabricOrganizationByMspIDStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing getFabricOrganizationByMspIDStmt: %w", cerr) - } - } - if q.getFabricOrganizationWithKeysStmt != nil { - if cerr := q.getFabricOrganizationWithKeysStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing getFabricOrganizationWithKeysStmt: %w", cerr) - } - } - if q.getKeyStmt != nil { - if cerr := q.getKeyStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing getKeyStmt: %w", cerr) - } - } - if q.getKeyByEthereumAddressStmt != nil { - if cerr := q.getKeyByEthereumAddressStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing getKeyByEthereumAddressStmt: %w", cerr) - } - } - if q.getKeyByIDStmt != nil { - if cerr := q.getKeyByIDStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing getKeyByIDStmt: %w", cerr) - } - } - if q.getKeyCountByProviderStmt != nil { - if cerr := q.getKeyCountByProviderStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing getKeyCountByProviderStmt: %w", cerr) - } - } - if q.getKeyProviderStmt != nil { - if cerr := q.getKeyProviderStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing getKeyProviderStmt: %w", cerr) - } - } - if q.getKeyProviderByDefaultStmt != nil { - if cerr := q.getKeyProviderByDefaultStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing getKeyProviderByDefaultStmt: %w", cerr) - } - } - if q.getKeyProviderByIDStmt != nil { - if cerr := q.getKeyProviderByIDStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing getKeyProviderByIDStmt: %w", cerr) - } - } - if q.getKeysByAlgorithmStmt != nil { - if cerr := q.getKeysByAlgorithmStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing getKeysByAlgorithmStmt: %w", cerr) - } - } - if q.getKeysByProviderAndCurveStmt != nil { - if cerr := q.getKeysByProviderAndCurveStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing getKeysByProviderAndCurveStmt: %w", cerr) - } - } - if q.getKeysCountStmt != nil { - if cerr := q.getKeysCountStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing getKeysCountStmt: %w", cerr) - } - } - if q.getLatestNodeEventStmt != nil { - if cerr := q.getLatestNodeEventStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing getLatestNodeEventStmt: %w", cerr) - } - } - if q.getNetworkStmt != nil { - if cerr := q.getNetworkStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing getNetworkStmt: %w", cerr) - } - } - if q.getNetworkByNameStmt != nil { - if cerr := q.getNetworkByNameStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing getNetworkByNameStmt: %w", cerr) - } - } - if q.getNetworkByNetworkIdStmt != nil { - if cerr := q.getNetworkByNetworkIdStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing getNetworkByNetworkIdStmt: %w", cerr) - } - } - if q.getNetworkCurrentConfigBlockStmt != nil { - if cerr := q.getNetworkCurrentConfigBlockStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing getNetworkCurrentConfigBlockStmt: %w", cerr) - } - } - if q.getNetworkNodeStmt != nil { - if cerr := q.getNetworkNodeStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing getNetworkNodeStmt: %w", cerr) - } - } - if q.getNetworkNodesStmt != nil { - if cerr := q.getNetworkNodesStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing getNetworkNodesStmt: %w", cerr) - } - } - if q.getNodeStmt != nil { - if cerr := q.getNodeStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing getNodeStmt: %w", cerr) - } - } - if q.getNodeBySlugStmt != nil { - if cerr := q.getNodeBySlugStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing getNodeBySlugStmt: %w", cerr) - } - } - if q.getNodeEventStmt != nil { - if cerr := q.getNodeEventStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing getNodeEventStmt: %w", cerr) - } - } - if q.getNotificationProviderStmt != nil { - if cerr := q.getNotificationProviderStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing getNotificationProviderStmt: %w", cerr) - } - } - if q.getOldestBackupByTargetStmt != nil { - if cerr := q.getOldestBackupByTargetStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing getOldestBackupByTargetStmt: %w", cerr) - } - } - if q.getOrdererPortsStmt != nil { - if cerr := q.getOrdererPortsStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing getOrdererPortsStmt: %w", cerr) - } - } - if q.getPeerPortsStmt != nil { - if cerr := q.getPeerPortsStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing getPeerPortsStmt: %w", cerr) - } - } - if q.getProvidersByNotificationTypeStmt != nil { - if cerr := q.getProvidersByNotificationTypeStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing getProvidersByNotificationTypeStmt: %w", cerr) - } - } - if q.getRecentCompletedBackupsStmt != nil { - if cerr := q.getRecentCompletedBackupsStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing getRecentCompletedBackupsStmt: %w", cerr) - } - } - if q.getSessionStmt != nil { - if cerr := q.getSessionStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing getSessionStmt: %w", cerr) - } - } - if q.getUserStmt != nil { - if cerr := q.getUserStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing getUserStmt: %w", cerr) - } - } - if q.getUserByUsernameStmt != nil { - if cerr := q.getUserByUsernameStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing getUserByUsernameStmt: %w", cerr) - } - } - if q.listBackupSchedulesStmt != nil { - if cerr := q.listBackupSchedulesStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing listBackupSchedulesStmt: %w", cerr) - } - } - if q.listBackupTargetsStmt != nil { - if cerr := q.listBackupTargetsStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing listBackupTargetsStmt: %w", cerr) - } - } - if q.listBackupsStmt != nil { - if cerr := q.listBackupsStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing listBackupsStmt: %w", cerr) - } - } - if q.listBackupsByScheduleStmt != nil { - if cerr := q.listBackupsByScheduleStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing listBackupsByScheduleStmt: %w", cerr) - } - } - if q.listBackupsByTargetStmt != nil { - if cerr := q.listBackupsByTargetStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing listBackupsByTargetStmt: %w", cerr) - } - } - if q.listFabricOrganizationsStmt != nil { - if cerr := q.listFabricOrganizationsStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing listFabricOrganizationsStmt: %w", cerr) - } - } - if q.listFabricOrganizationsWithKeysStmt != nil { - if cerr := q.listFabricOrganizationsWithKeysStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing listFabricOrganizationsWithKeysStmt: %w", cerr) - } - } - if q.listKeyProvidersStmt != nil { - if cerr := q.listKeyProvidersStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing listKeyProvidersStmt: %w", cerr) - } - } - if q.listKeysStmt != nil { - if cerr := q.listKeysStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing listKeysStmt: %w", cerr) - } - } - if q.listNetworkNodesByNetworkStmt != nil { - if cerr := q.listNetworkNodesByNetworkStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing listNetworkNodesByNetworkStmt: %w", cerr) - } - } - if q.listNetworkNodesByNodeStmt != nil { - if cerr := q.listNetworkNodesByNodeStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing listNetworkNodesByNodeStmt: %w", cerr) - } - } - if q.listNetworksStmt != nil { - if cerr := q.listNetworksStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing listNetworksStmt: %w", cerr) - } - } - if q.listNodeEventsStmt != nil { - if cerr := q.listNodeEventsStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing listNodeEventsStmt: %w", cerr) - } - } - if q.listNodeEventsByTypeStmt != nil { - if cerr := q.listNodeEventsByTypeStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing listNodeEventsByTypeStmt: %w", cerr) - } - } - if q.listNodesStmt != nil { - if cerr := q.listNodesStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing listNodesStmt: %w", cerr) - } - } - if q.listNodesByNetworkStmt != nil { - if cerr := q.listNodesByNetworkStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing listNodesByNetworkStmt: %w", cerr) - } - } - if q.listNodesByPlatformStmt != nil { - if cerr := q.listNodesByPlatformStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing listNodesByPlatformStmt: %w", cerr) - } - } - if q.listNotificationProvidersStmt != nil { - if cerr := q.listNotificationProvidersStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing listNotificationProvidersStmt: %w", cerr) - } - } - if q.listUsersStmt != nil { - if cerr := q.listUsersStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing listUsersStmt: %w", cerr) - } - } - if q.markBackupNotifiedStmt != nil { - if cerr := q.markBackupNotifiedStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing markBackupNotifiedStmt: %w", cerr) - } - } - if q.unsetDefaultNotificationProviderStmt != nil { - if cerr := q.unsetDefaultNotificationProviderStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing unsetDefaultNotificationProviderStmt: %w", cerr) - } - } - if q.unsetDefaultProviderStmt != nil { - if cerr := q.unsetDefaultProviderStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing unsetDefaultProviderStmt: %w", cerr) - } - } - if q.updateBackupCompletedStmt != nil { - if cerr := q.updateBackupCompletedStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing updateBackupCompletedStmt: %w", cerr) - } - } - if q.updateBackupFailedStmt != nil { - if cerr := q.updateBackupFailedStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing updateBackupFailedStmt: %w", cerr) - } - } - if q.updateBackupScheduleStmt != nil { - if cerr := q.updateBackupScheduleStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing updateBackupScheduleStmt: %w", cerr) - } - } - if q.updateBackupScheduleLastRunStmt != nil { - if cerr := q.updateBackupScheduleLastRunStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing updateBackupScheduleLastRunStmt: %w", cerr) - } - } - if q.updateBackupSizeStmt != nil { - if cerr := q.updateBackupSizeStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing updateBackupSizeStmt: %w", cerr) - } - } - if q.updateBackupStatusStmt != nil { - if cerr := q.updateBackupStatusStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing updateBackupStatusStmt: %w", cerr) - } - } - if q.updateBackupTargetStmt != nil { - if cerr := q.updateBackupTargetStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing updateBackupTargetStmt: %w", cerr) - } - } - if q.updateFabricOrganizationStmt != nil { - if cerr := q.updateFabricOrganizationStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing updateFabricOrganizationStmt: %w", cerr) - } - } - if q.updateKeyStmt != nil { - if cerr := q.updateKeyStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing updateKeyStmt: %w", cerr) - } - } - if q.updateKeyProviderStmt != nil { - if cerr := q.updateKeyProviderStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing updateKeyProviderStmt: %w", cerr) - } - } - if q.updateNetworkCurrentConfigBlockStmt != nil { - if cerr := q.updateNetworkCurrentConfigBlockStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing updateNetworkCurrentConfigBlockStmt: %w", cerr) - } - } - if q.updateNetworkGenesisBlockStmt != nil { - if cerr := q.updateNetworkGenesisBlockStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing updateNetworkGenesisBlockStmt: %w", cerr) - } - } - if q.updateNetworkNodeRoleStmt != nil { - if cerr := q.updateNetworkNodeRoleStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing updateNetworkNodeRoleStmt: %w", cerr) - } - } - if q.updateNetworkNodeStatusStmt != nil { - if cerr := q.updateNetworkNodeStatusStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing updateNetworkNodeStatusStmt: %w", cerr) - } - } - if q.updateNetworkStatusStmt != nil { - if cerr := q.updateNetworkStatusStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing updateNetworkStatusStmt: %w", cerr) - } - } - if q.updateNodeDeploymentConfigStmt != nil { - if cerr := q.updateNodeDeploymentConfigStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing updateNodeDeploymentConfigStmt: %w", cerr) - } - } - if q.updateNodeEndpointStmt != nil { - if cerr := q.updateNodeEndpointStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing updateNodeEndpointStmt: %w", cerr) - } - } - if q.updateNodePublicEndpointStmt != nil { - if cerr := q.updateNodePublicEndpointStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing updateNodePublicEndpointStmt: %w", cerr) - } - } - if q.updateNodeStatusStmt != nil { - if cerr := q.updateNodeStatusStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing updateNodeStatusStmt: %w", cerr) - } - } - if q.updateNotificationProviderStmt != nil { - if cerr := q.updateNotificationProviderStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing updateNotificationProviderStmt: %w", cerr) - } - } - if q.updateProviderTestResultsStmt != nil { - if cerr := q.updateProviderTestResultsStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing updateProviderTestResultsStmt: %w", cerr) - } - } - if q.updateUserStmt != nil { - if cerr := q.updateUserStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing updateUserStmt: %w", cerr) - } - } - if q.updateUserLastLoginStmt != nil { - if cerr := q.updateUserLastLoginStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing updateUserLastLoginStmt: %w", cerr) - } - } - return err -} - -func (q *Queries) exec(ctx context.Context, stmt *sql.Stmt, query string, args ...interface{}) (sql.Result, error) { - switch { - case stmt != nil && q.tx != nil: - return q.tx.StmtContext(ctx, stmt).ExecContext(ctx, args...) - case stmt != nil: - return stmt.ExecContext(ctx, args...) - default: - return q.db.ExecContext(ctx, query, args...) - } -} - -func (q *Queries) query(ctx context.Context, stmt *sql.Stmt, query string, args ...interface{}) (*sql.Rows, error) { - switch { - case stmt != nil && q.tx != nil: - return q.tx.StmtContext(ctx, stmt).QueryContext(ctx, args...) - case stmt != nil: - return stmt.QueryContext(ctx, args...) - default: - return q.db.QueryContext(ctx, query, args...) - } -} - -func (q *Queries) queryRow(ctx context.Context, stmt *sql.Stmt, query string, args ...interface{}) *sql.Row { - switch { - case stmt != nil && q.tx != nil: - return q.tx.StmtContext(ctx, stmt).QueryRowContext(ctx, args...) - case stmt != nil: - return stmt.QueryRowContext(ctx, args...) - default: - return q.db.QueryRowContext(ctx, query, args...) - } -} - type Queries struct { - db DBTX - tx *sql.Tx - checkNetworkNodeExistsStmt *sql.Stmt - countBackupsByScheduleStmt *sql.Stmt - countBackupsByTargetStmt *sql.Stmt - countNetworksStmt *sql.Stmt - countNodeEventsStmt *sql.Stmt - countNodesStmt *sql.Stmt - countNodesByPlatformStmt *sql.Stmt - countUsersStmt *sql.Stmt - createBackupStmt *sql.Stmt - createBackupScheduleStmt *sql.Stmt - createBackupTargetStmt *sql.Stmt - createFabricOrganizationStmt *sql.Stmt - createKeyStmt *sql.Stmt - createKeyProviderStmt *sql.Stmt - createNetworkStmt *sql.Stmt - createNetworkFullStmt *sql.Stmt - createNetworkNodeStmt *sql.Stmt - createNodeStmt *sql.Stmt - createNodeEventStmt *sql.Stmt - createNotificationProviderStmt *sql.Stmt - createSessionStmt *sql.Stmt - createUserStmt *sql.Stmt - deleteBackupStmt *sql.Stmt - deleteBackupScheduleStmt *sql.Stmt - deleteBackupTargetStmt *sql.Stmt - deleteBackupsByScheduleStmt *sql.Stmt - deleteBackupsByTargetStmt *sql.Stmt - deleteExpiredSessionsStmt *sql.Stmt - deleteFabricOrganizationStmt *sql.Stmt - deleteKeyStmt *sql.Stmt - deleteKeyProviderStmt *sql.Stmt - deleteNetworkStmt *sql.Stmt - deleteNetworkNodeStmt *sql.Stmt - deleteNodeStmt *sql.Stmt - deleteNotificationProviderStmt *sql.Stmt - deleteOldBackupsStmt *sql.Stmt - deleteSessionStmt *sql.Stmt - deleteUserStmt *sql.Stmt - deleteUserSessionsStmt *sql.Stmt - disableBackupScheduleStmt *sql.Stmt - enableBackupScheduleStmt *sql.Stmt - getAllKeysStmt *sql.Stmt - getAllNodesStmt *sql.Stmt - getBackupStmt *sql.Stmt - getBackupScheduleStmt *sql.Stmt - getBackupTargetStmt *sql.Stmt - getBackupsByDateRangeStmt *sql.Stmt - getBackupsByScheduleAndStatusStmt *sql.Stmt - getBackupsByStatusStmt *sql.Stmt - getDefaultNotificationProviderStmt *sql.Stmt - getDefaultNotificationProviderForTypeStmt *sql.Stmt - getFabricOrganizationStmt *sql.Stmt - getFabricOrganizationByIDStmt *sql.Stmt - getFabricOrganizationByMSPIDStmt *sql.Stmt - getFabricOrganizationByMspIDStmt *sql.Stmt - getFabricOrganizationWithKeysStmt *sql.Stmt - getKeyStmt *sql.Stmt - getKeyByEthereumAddressStmt *sql.Stmt - getKeyByIDStmt *sql.Stmt - getKeyCountByProviderStmt *sql.Stmt - getKeyProviderStmt *sql.Stmt - getKeyProviderByDefaultStmt *sql.Stmt - getKeyProviderByIDStmt *sql.Stmt - getKeysByAlgorithmStmt *sql.Stmt - getKeysByProviderAndCurveStmt *sql.Stmt - getKeysCountStmt *sql.Stmt - getLatestNodeEventStmt *sql.Stmt - getNetworkStmt *sql.Stmt - getNetworkByNameStmt *sql.Stmt - getNetworkByNetworkIdStmt *sql.Stmt - getNetworkCurrentConfigBlockStmt *sql.Stmt - getNetworkNodeStmt *sql.Stmt - getNetworkNodesStmt *sql.Stmt - getNodeStmt *sql.Stmt - getNodeBySlugStmt *sql.Stmt - getNodeEventStmt *sql.Stmt - getNotificationProviderStmt *sql.Stmt - getOldestBackupByTargetStmt *sql.Stmt - getOrdererPortsStmt *sql.Stmt - getPeerPortsStmt *sql.Stmt - getProvidersByNotificationTypeStmt *sql.Stmt - getRecentCompletedBackupsStmt *sql.Stmt - getSessionStmt *sql.Stmt - getUserStmt *sql.Stmt - getUserByUsernameStmt *sql.Stmt - listBackupSchedulesStmt *sql.Stmt - listBackupTargetsStmt *sql.Stmt - listBackupsStmt *sql.Stmt - listBackupsByScheduleStmt *sql.Stmt - listBackupsByTargetStmt *sql.Stmt - listFabricOrganizationsStmt *sql.Stmt - listFabricOrganizationsWithKeysStmt *sql.Stmt - listKeyProvidersStmt *sql.Stmt - listKeysStmt *sql.Stmt - listNetworkNodesByNetworkStmt *sql.Stmt - listNetworkNodesByNodeStmt *sql.Stmt - listNetworksStmt *sql.Stmt - listNodeEventsStmt *sql.Stmt - listNodeEventsByTypeStmt *sql.Stmt - listNodesStmt *sql.Stmt - listNodesByNetworkStmt *sql.Stmt - listNodesByPlatformStmt *sql.Stmt - listNotificationProvidersStmt *sql.Stmt - listUsersStmt *sql.Stmt - markBackupNotifiedStmt *sql.Stmt - unsetDefaultNotificationProviderStmt *sql.Stmt - unsetDefaultProviderStmt *sql.Stmt - updateBackupCompletedStmt *sql.Stmt - updateBackupFailedStmt *sql.Stmt - updateBackupScheduleStmt *sql.Stmt - updateBackupScheduleLastRunStmt *sql.Stmt - updateBackupSizeStmt *sql.Stmt - updateBackupStatusStmt *sql.Stmt - updateBackupTargetStmt *sql.Stmt - updateFabricOrganizationStmt *sql.Stmt - updateKeyStmt *sql.Stmt - updateKeyProviderStmt *sql.Stmt - updateNetworkCurrentConfigBlockStmt *sql.Stmt - updateNetworkGenesisBlockStmt *sql.Stmt - updateNetworkNodeRoleStmt *sql.Stmt - updateNetworkNodeStatusStmt *sql.Stmt - updateNetworkStatusStmt *sql.Stmt - updateNodeDeploymentConfigStmt *sql.Stmt - updateNodeEndpointStmt *sql.Stmt - updateNodePublicEndpointStmt *sql.Stmt - updateNodeStatusStmt *sql.Stmt - updateNotificationProviderStmt *sql.Stmt - updateProviderTestResultsStmt *sql.Stmt - updateUserStmt *sql.Stmt - updateUserLastLoginStmt *sql.Stmt + db DBTX } func (q *Queries) WithTx(tx *sql.Tx) *Queries { return &Queries{ - db: tx, - tx: tx, - checkNetworkNodeExistsStmt: q.checkNetworkNodeExistsStmt, - countBackupsByScheduleStmt: q.countBackupsByScheduleStmt, - countBackupsByTargetStmt: q.countBackupsByTargetStmt, - countNetworksStmt: q.countNetworksStmt, - countNodeEventsStmt: q.countNodeEventsStmt, - countNodesStmt: q.countNodesStmt, - countNodesByPlatformStmt: q.countNodesByPlatformStmt, - countUsersStmt: q.countUsersStmt, - createBackupStmt: q.createBackupStmt, - createBackupScheduleStmt: q.createBackupScheduleStmt, - createBackupTargetStmt: q.createBackupTargetStmt, - createFabricOrganizationStmt: q.createFabricOrganizationStmt, - createKeyStmt: q.createKeyStmt, - createKeyProviderStmt: q.createKeyProviderStmt, - createNetworkStmt: q.createNetworkStmt, - createNetworkFullStmt: q.createNetworkFullStmt, - createNetworkNodeStmt: q.createNetworkNodeStmt, - createNodeStmt: q.createNodeStmt, - createNodeEventStmt: q.createNodeEventStmt, - createNotificationProviderStmt: q.createNotificationProviderStmt, - createSessionStmt: q.createSessionStmt, - createUserStmt: q.createUserStmt, - deleteBackupStmt: q.deleteBackupStmt, - deleteBackupScheduleStmt: q.deleteBackupScheduleStmt, - deleteBackupTargetStmt: q.deleteBackupTargetStmt, - deleteBackupsByScheduleStmt: q.deleteBackupsByScheduleStmt, - deleteBackupsByTargetStmt: q.deleteBackupsByTargetStmt, - deleteExpiredSessionsStmt: q.deleteExpiredSessionsStmt, - deleteFabricOrganizationStmt: q.deleteFabricOrganizationStmt, - deleteKeyStmt: q.deleteKeyStmt, - deleteKeyProviderStmt: q.deleteKeyProviderStmt, - deleteNetworkStmt: q.deleteNetworkStmt, - deleteNetworkNodeStmt: q.deleteNetworkNodeStmt, - deleteNodeStmt: q.deleteNodeStmt, - deleteNotificationProviderStmt: q.deleteNotificationProviderStmt, - deleteOldBackupsStmt: q.deleteOldBackupsStmt, - deleteSessionStmt: q.deleteSessionStmt, - deleteUserStmt: q.deleteUserStmt, - deleteUserSessionsStmt: q.deleteUserSessionsStmt, - disableBackupScheduleStmt: q.disableBackupScheduleStmt, - enableBackupScheduleStmt: q.enableBackupScheduleStmt, - getAllKeysStmt: q.getAllKeysStmt, - getAllNodesStmt: q.getAllNodesStmt, - getBackupStmt: q.getBackupStmt, - getBackupScheduleStmt: q.getBackupScheduleStmt, - getBackupTargetStmt: q.getBackupTargetStmt, - getBackupsByDateRangeStmt: q.getBackupsByDateRangeStmt, - getBackupsByScheduleAndStatusStmt: q.getBackupsByScheduleAndStatusStmt, - getBackupsByStatusStmt: q.getBackupsByStatusStmt, - getDefaultNotificationProviderStmt: q.getDefaultNotificationProviderStmt, - getDefaultNotificationProviderForTypeStmt: q.getDefaultNotificationProviderForTypeStmt, - getFabricOrganizationStmt: q.getFabricOrganizationStmt, - getFabricOrganizationByIDStmt: q.getFabricOrganizationByIDStmt, - getFabricOrganizationByMSPIDStmt: q.getFabricOrganizationByMSPIDStmt, - getFabricOrganizationByMspIDStmt: q.getFabricOrganizationByMspIDStmt, - getFabricOrganizationWithKeysStmt: q.getFabricOrganizationWithKeysStmt, - getKeyStmt: q.getKeyStmt, - getKeyByEthereumAddressStmt: q.getKeyByEthereumAddressStmt, - getKeyByIDStmt: q.getKeyByIDStmt, - getKeyCountByProviderStmt: q.getKeyCountByProviderStmt, - getKeyProviderStmt: q.getKeyProviderStmt, - getKeyProviderByDefaultStmt: q.getKeyProviderByDefaultStmt, - getKeyProviderByIDStmt: q.getKeyProviderByIDStmt, - getKeysByAlgorithmStmt: q.getKeysByAlgorithmStmt, - getKeysByProviderAndCurveStmt: q.getKeysByProviderAndCurveStmt, - getKeysCountStmt: q.getKeysCountStmt, - getLatestNodeEventStmt: q.getLatestNodeEventStmt, - getNetworkStmt: q.getNetworkStmt, - getNetworkByNameStmt: q.getNetworkByNameStmt, - getNetworkByNetworkIdStmt: q.getNetworkByNetworkIdStmt, - getNetworkCurrentConfigBlockStmt: q.getNetworkCurrentConfigBlockStmt, - getNetworkNodeStmt: q.getNetworkNodeStmt, - getNetworkNodesStmt: q.getNetworkNodesStmt, - getNodeStmt: q.getNodeStmt, - getNodeBySlugStmt: q.getNodeBySlugStmt, - getNodeEventStmt: q.getNodeEventStmt, - getNotificationProviderStmt: q.getNotificationProviderStmt, - getOldestBackupByTargetStmt: q.getOldestBackupByTargetStmt, - getOrdererPortsStmt: q.getOrdererPortsStmt, - getPeerPortsStmt: q.getPeerPortsStmt, - getProvidersByNotificationTypeStmt: q.getProvidersByNotificationTypeStmt, - getRecentCompletedBackupsStmt: q.getRecentCompletedBackupsStmt, - getSessionStmt: q.getSessionStmt, - getUserStmt: q.getUserStmt, - getUserByUsernameStmt: q.getUserByUsernameStmt, - listBackupSchedulesStmt: q.listBackupSchedulesStmt, - listBackupTargetsStmt: q.listBackupTargetsStmt, - listBackupsStmt: q.listBackupsStmt, - listBackupsByScheduleStmt: q.listBackupsByScheduleStmt, - listBackupsByTargetStmt: q.listBackupsByTargetStmt, - listFabricOrganizationsStmt: q.listFabricOrganizationsStmt, - listFabricOrganizationsWithKeysStmt: q.listFabricOrganizationsWithKeysStmt, - listKeyProvidersStmt: q.listKeyProvidersStmt, - listKeysStmt: q.listKeysStmt, - listNetworkNodesByNetworkStmt: q.listNetworkNodesByNetworkStmt, - listNetworkNodesByNodeStmt: q.listNetworkNodesByNodeStmt, - listNetworksStmt: q.listNetworksStmt, - listNodeEventsStmt: q.listNodeEventsStmt, - listNodeEventsByTypeStmt: q.listNodeEventsByTypeStmt, - listNodesStmt: q.listNodesStmt, - listNodesByNetworkStmt: q.listNodesByNetworkStmt, - listNodesByPlatformStmt: q.listNodesByPlatformStmt, - listNotificationProvidersStmt: q.listNotificationProvidersStmt, - listUsersStmt: q.listUsersStmt, - markBackupNotifiedStmt: q.markBackupNotifiedStmt, - unsetDefaultNotificationProviderStmt: q.unsetDefaultNotificationProviderStmt, - unsetDefaultProviderStmt: q.unsetDefaultProviderStmt, - updateBackupCompletedStmt: q.updateBackupCompletedStmt, - updateBackupFailedStmt: q.updateBackupFailedStmt, - updateBackupScheduleStmt: q.updateBackupScheduleStmt, - updateBackupScheduleLastRunStmt: q.updateBackupScheduleLastRunStmt, - updateBackupSizeStmt: q.updateBackupSizeStmt, - updateBackupStatusStmt: q.updateBackupStatusStmt, - updateBackupTargetStmt: q.updateBackupTargetStmt, - updateFabricOrganizationStmt: q.updateFabricOrganizationStmt, - updateKeyStmt: q.updateKeyStmt, - updateKeyProviderStmt: q.updateKeyProviderStmt, - updateNetworkCurrentConfigBlockStmt: q.updateNetworkCurrentConfigBlockStmt, - updateNetworkGenesisBlockStmt: q.updateNetworkGenesisBlockStmt, - updateNetworkNodeRoleStmt: q.updateNetworkNodeRoleStmt, - updateNetworkNodeStatusStmt: q.updateNetworkNodeStatusStmt, - updateNetworkStatusStmt: q.updateNetworkStatusStmt, - updateNodeDeploymentConfigStmt: q.updateNodeDeploymentConfigStmt, - updateNodeEndpointStmt: q.updateNodeEndpointStmt, - updateNodePublicEndpointStmt: q.updateNodePublicEndpointStmt, - updateNodeStatusStmt: q.updateNodeStatusStmt, - updateNotificationProviderStmt: q.updateNotificationProviderStmt, - updateProviderTestResultsStmt: q.updateProviderTestResultsStmt, - updateUserStmt: q.updateUserStmt, - updateUserLastLoginStmt: q.updateUserLastLoginStmt, + db: tx, } } diff --git a/pkg/db/migrations/0002_add_crl_tables.down.sql b/pkg/db/migrations/0002_add_crl_tables.down.sql new file mode 100644 index 0000000..33ec919 --- /dev/null +++ b/pkg/db/migrations/0002_add_crl_tables.down.sql @@ -0,0 +1,26 @@ +-- Drop indexes +DROP INDEX IF EXISTS idx_revoked_certs_org_id; +DROP INDEX IF EXISTS idx_revoked_certs_serial; +DROP INDEX IF EXISTS idx_revoked_certs_revocation_time; + +-- Drop the revoked certificates table +DROP TABLE IF EXISTS fabric_revoked_certificates; + +-- Remove CRL-related columns from fabric_organizations +-- SQLite doesn't support DROP COLUMN directly, need to recreate table +PRAGMA foreign_keys=off; + +CREATE TABLE fabric_organizations_new AS +SELECT id, name, msp_id, sign_key_id, tls_root_key_id, admin_sign_key_id, provider_id, created_at, updated_at +FROM fabric_organizations; + +DROP TABLE fabric_organizations; +ALTER TABLE fabric_organizations_new RENAME TO fabric_organizations; + +-- Recreate foreign key constraints +CREATE INDEX IF NOT EXISTS idx_fabric_organizations_sign_key_id ON fabric_organizations(sign_key_id); +CREATE INDEX IF NOT EXISTS idx_fabric_organizations_tls_root_key_id ON fabric_organizations(tls_root_key_id); +CREATE INDEX IF NOT EXISTS idx_fabric_organizations_admin_sign_key_id ON fabric_organizations(admin_sign_key_id); +CREATE INDEX IF NOT EXISTS idx_fabric_organizations_provider_id ON fabric_organizations(provider_id); + +PRAGMA foreign_keys=on; \ No newline at end of file diff --git a/pkg/db/migrations/0002_add_crl_tables.up.sql b/pkg/db/migrations/0002_add_crl_tables.up.sql new file mode 100644 index 0000000..9abdfcc --- /dev/null +++ b/pkg/db/migrations/0002_add_crl_tables.up.sql @@ -0,0 +1,23 @@ +-- Add CRL-related fields to fabric_organizations +ALTER TABLE fabric_organizations +ADD COLUMN crl_key_id INTEGER REFERENCES keys(id); +ALTER TABLE fabric_organizations +ADD COLUMN crl_last_update TIMESTAMP; + +-- Create table for revoked certificates +CREATE TABLE fabric_revoked_certificates ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + fabric_organization_id INTEGER NOT NULL REFERENCES fabric_organizations(id) ON DELETE CASCADE, + serial_number TEXT NOT NULL, -- Store as hex string for compatibility + revocation_time DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP, + reason INTEGER NOT NULL, -- RFC 5280 revocation reason code + issuer_certificate_id INTEGER REFERENCES keys(id), -- Reference to the certificate that issued this one + created_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP, + updated_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP, + UNIQUE(fabric_organization_id, serial_number) +); + +-- Create indexes for performance +CREATE INDEX idx_revoked_certs_org_id ON fabric_revoked_certificates(fabric_organization_id); +CREATE INDEX idx_revoked_certs_serial ON fabric_revoked_certificates(serial_number); +CREATE INDEX idx_revoked_certs_revocation_time ON fabric_revoked_certificates(revocation_time); \ No newline at end of file diff --git a/pkg/db/migrations/0003_add_settings.down.sql b/pkg/db/migrations/0003_add_settings.down.sql new file mode 100644 index 0000000..4596c6a --- /dev/null +++ b/pkg/db/migrations/0003_add_settings.down.sql @@ -0,0 +1 @@ +DROP TABLE IF EXISTS settings; diff --git a/pkg/db/migrations/0003_add_settings.up.sql b/pkg/db/migrations/0003_add_settings.up.sql new file mode 100644 index 0000000..2993609 --- /dev/null +++ b/pkg/db/migrations/0003_add_settings.up.sql @@ -0,0 +1,7 @@ +CREATE TABLE settings ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + config TEXT NOT NULL, + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP +); + diff --git a/pkg/db/migrations/0004_add_node_error_message.down.sql b/pkg/db/migrations/0004_add_node_error_message.down.sql new file mode 100644 index 0000000..2c95d79 --- /dev/null +++ b/pkg/db/migrations/0004_add_node_error_message.down.sql @@ -0,0 +1 @@ +ALTER TABLE nodes DROP COLUMN error_message; \ No newline at end of file diff --git a/pkg/db/migrations/0004_add_node_error_message.up.sql b/pkg/db/migrations/0004_add_node_error_message.up.sql new file mode 100644 index 0000000..0bb70c5 --- /dev/null +++ b/pkg/db/migrations/0004_add_node_error_message.up.sql @@ -0,0 +1 @@ +ALTER TABLE nodes ADD COLUMN error_message TEXT DEFAULT NULL; \ No newline at end of file diff --git a/pkg/db/models.go b/pkg/db/models.go index ed0cbef..07eb38f 100644 --- a/pkg/db/models.go +++ b/pkg/db/models.go @@ -11,45 +11,45 @@ import ( type Backup struct { ID int64 `json:"id"` - ScheduleID sql.NullInt64 `json:"schedule_id"` - TargetID int64 `json:"target_id"` + ScheduleID sql.NullInt64 `json:"scheduleId"` + TargetID int64 `json:"targetId"` Status string `json:"status"` - SizeBytes sql.NullInt64 `json:"size_bytes"` - StartedAt time.Time `json:"started_at"` - CompletedAt sql.NullTime `json:"completed_at"` - ErrorMessage sql.NullString `json:"error_message"` - CreatedAt time.Time `json:"created_at"` - NotificationSent int64 `json:"notification_sent"` + SizeBytes sql.NullInt64 `json:"sizeBytes"` + StartedAt time.Time `json:"startedAt"` + CompletedAt sql.NullTime `json:"completedAt"` + ErrorMessage sql.NullString `json:"errorMessage"` + CreatedAt time.Time `json:"createdAt"` + NotificationSent int64 `json:"notificationSent"` } type BackupSchedule struct { ID int64 `json:"id"` Name string `json:"name"` Description sql.NullString `json:"description"` - CronExpression string `json:"cron_expression"` - TargetID int64 `json:"target_id"` - RetentionDays int64 `json:"retention_days"` + CronExpression string `json:"cronExpression"` + TargetID int64 `json:"targetId"` + RetentionDays int64 `json:"retentionDays"` Enabled bool `json:"enabled"` - CreatedAt time.Time `json:"created_at"` - UpdatedAt sql.NullTime `json:"updated_at"` - LastRunAt sql.NullTime `json:"last_run_at"` - NextRunAt sql.NullTime `json:"next_run_at"` + CreatedAt time.Time `json:"createdAt"` + UpdatedAt sql.NullTime `json:"updatedAt"` + LastRunAt sql.NullTime `json:"lastRunAt"` + NextRunAt sql.NullTime `json:"nextRunAt"` } type BackupTarget struct { ID int64 `json:"id"` Name string `json:"name"` - BucketName sql.NullString `json:"bucket_name"` + BucketName sql.NullString `json:"bucketName"` Region sql.NullString `json:"region"` Endpoint sql.NullString `json:"endpoint"` - BucketPath sql.NullString `json:"bucket_path"` - AccessKeyID sql.NullString `json:"access_key_id"` - SecretKey sql.NullString `json:"secret_key"` - S3PathStyle sql.NullBool `json:"s3_path_style"` - ResticPassword sql.NullString `json:"restic_password"` + BucketPath sql.NullString `json:"bucketPath"` + AccessKeyID sql.NullString `json:"accessKeyId"` + SecretKey sql.NullString `json:"secretKey"` + S3PathStyle sql.NullBool `json:"s3PathStyle"` + ResticPassword sql.NullString `json:"resticPassword"` Type string `json:"type"` - CreatedAt time.Time `json:"created_at"` - UpdatedAt sql.NullTime `json:"updated_at"` + CreatedAt time.Time `json:"createdAt"` + UpdatedAt sql.NullTime `json:"updatedAt"` } type BlockchainPlatform struct { @@ -58,19 +58,32 @@ type BlockchainPlatform struct { type FabricOrganization struct { ID int64 `json:"id"` - MspID string `json:"msp_id"` + MspID string `json:"mspId"` Description sql.NullString `json:"description"` Config sql.NullString `json:"config"` - CaConfig sql.NullString `json:"ca_config"` - SignKeyID sql.NullInt64 `json:"sign_key_id"` - TlsRootKeyID sql.NullInt64 `json:"tls_root_key_id"` - AdminTlsKeyID sql.NullInt64 `json:"admin_tls_key_id"` - AdminSignKeyID sql.NullInt64 `json:"admin_sign_key_id"` - ClientSignKeyID sql.NullInt64 `json:"client_sign_key_id"` - ProviderID sql.NullInt64 `json:"provider_id"` - CreatedAt time.Time `json:"created_at"` - CreatedBy sql.NullInt64 `json:"created_by"` - UpdatedAt sql.NullTime `json:"updated_at"` + CaConfig sql.NullString `json:"caConfig"` + SignKeyID sql.NullInt64 `json:"signKeyId"` + TlsRootKeyID sql.NullInt64 `json:"tlsRootKeyId"` + AdminTlsKeyID sql.NullInt64 `json:"adminTlsKeyId"` + AdminSignKeyID sql.NullInt64 `json:"adminSignKeyId"` + ClientSignKeyID sql.NullInt64 `json:"clientSignKeyId"` + ProviderID sql.NullInt64 `json:"providerId"` + CreatedAt time.Time `json:"createdAt"` + CreatedBy sql.NullInt64 `json:"createdBy"` + UpdatedAt sql.NullTime `json:"updatedAt"` + CrlKeyID sql.NullInt64 `json:"crlKeyId"` + CrlLastUpdate sql.NullTime `json:"crlLastUpdate"` +} + +type FabricRevokedCertificate struct { + ID int64 `json:"id"` + FabricOrganizationID int64 `json:"fabricOrganizationId"` + SerialNumber string `json:"serialNumber"` + RevocationTime time.Time `json:"revocationTime"` + Reason int64 `json:"reason"` + IssuerCertificateID sql.NullInt64 `json:"issuerCertificateId"` + CreatedAt time.Time `json:"createdAt"` + UpdatedAt time.Time `json:"updatedAt"` } type Key struct { @@ -78,34 +91,34 @@ type Key struct { Name string `json:"name"` Description sql.NullString `json:"description"` Algorithm string `json:"algorithm"` - KeySize sql.NullInt64 `json:"key_size"` + KeySize sql.NullInt64 `json:"keySize"` Curve sql.NullString `json:"curve"` Format string `json:"format"` - PublicKey string `json:"public_key"` - PrivateKey string `json:"private_key"` + PublicKey string `json:"publicKey"` + PrivateKey string `json:"privateKey"` Certificate sql.NullString `json:"certificate"` Status string `json:"status"` - CreatedAt time.Time `json:"created_at"` - UpdatedAt time.Time `json:"updated_at"` - ExpiresAt sql.NullTime `json:"expires_at"` - LastRotatedAt sql.NullTime `json:"last_rotated_at"` - SigningKeyID sql.NullInt64 `json:"signing_key_id"` - Sha256Fingerprint string `json:"sha256_fingerprint"` - Sha1Fingerprint string `json:"sha1_fingerprint"` - ProviderID int64 `json:"provider_id"` - UserID int64 `json:"user_id"` - IsCa int64 `json:"is_ca"` - EthereumAddress sql.NullString `json:"ethereum_address"` + CreatedAt time.Time `json:"createdAt"` + UpdatedAt time.Time `json:"updatedAt"` + ExpiresAt sql.NullTime `json:"expiresAt"` + LastRotatedAt sql.NullTime `json:"lastRotatedAt"` + SigningKeyID sql.NullInt64 `json:"signingKeyId"` + Sha256Fingerprint string `json:"sha256Fingerprint"` + Sha1Fingerprint string `json:"sha1Fingerprint"` + ProviderID int64 `json:"providerId"` + UserID int64 `json:"userId"` + IsCa int64 `json:"isCa"` + EthereumAddress sql.NullString `json:"ethereumAddress"` } type KeyProvider struct { ID int64 `json:"id"` Name string `json:"name"` Type string `json:"type"` - IsDefault int64 `json:"is_default"` + IsDefault int64 `json:"isDefault"` Config string `json:"config"` - CreatedAt time.Time `json:"created_at"` - UpdatedAt time.Time `json:"updated_at"` + CreatedAt time.Time `json:"createdAt"` + UpdatedAt time.Time `json:"updatedAt"` } type KeyProviderType struct { @@ -115,30 +128,30 @@ type KeyProviderType struct { type Network struct { ID int64 `json:"id"` Name string `json:"name"` - NetworkID sql.NullString `json:"network_id"` + NetworkID sql.NullString `json:"networkId"` Platform string `json:"platform"` Status string `json:"status"` Description sql.NullString `json:"description"` Config sql.NullString `json:"config"` - DeploymentConfig sql.NullString `json:"deployment_config"` - ExposedPorts sql.NullString `json:"exposed_ports"` + DeploymentConfig sql.NullString `json:"deploymentConfig"` + ExposedPorts sql.NullString `json:"exposedPorts"` Domain sql.NullString `json:"domain"` - CreatedAt time.Time `json:"created_at"` - CreatedBy sql.NullInt64 `json:"created_by"` - UpdatedAt sql.NullTime `json:"updated_at"` - GenesisBlockB64 sql.NullString `json:"genesis_block_b64"` - CurrentConfigBlockB64 sql.NullString `json:"current_config_block_b64"` + CreatedAt time.Time `json:"createdAt"` + CreatedBy sql.NullInt64 `json:"createdBy"` + UpdatedAt sql.NullTime `json:"updatedAt"` + GenesisBlockB64 sql.NullString `json:"genesisBlockB64"` + CurrentConfigBlockB64 sql.NullString `json:"currentConfigBlockB64"` } type NetworkNode struct { ID int64 `json:"id"` - NetworkID int64 `json:"network_id"` - NodeID int64 `json:"node_id"` + NetworkID int64 `json:"networkId"` + NodeID int64 `json:"nodeId"` Role string `json:"role"` Status string `json:"status"` Config sql.NullString `json:"config"` - CreatedAt time.Time `json:"created_at"` - UpdatedAt time.Time `json:"updated_at"` + CreatedAt time.Time `json:"createdAt"` + UpdatedAt time.Time `json:"updatedAt"` } type Node struct { @@ -148,37 +161,38 @@ type Node struct { Platform string `json:"platform"` Status string `json:"status"` Description sql.NullString `json:"description"` - NetworkID sql.NullInt64 `json:"network_id"` + NetworkID sql.NullInt64 `json:"networkId"` Config sql.NullString `json:"config"` Resources sql.NullString `json:"resources"` Endpoint sql.NullString `json:"endpoint"` - PublicEndpoint sql.NullString `json:"public_endpoint"` - P2pAddress sql.NullString `json:"p2p_address"` - CreatedAt time.Time `json:"created_at"` - CreatedBy sql.NullInt64 `json:"created_by"` - UpdatedAt sql.NullTime `json:"updated_at"` - FabricOrganizationID sql.NullInt64 `json:"fabric_organization_id"` - NodeType sql.NullString `json:"node_type"` - NodeConfig sql.NullString `json:"node_config"` - DeploymentConfig sql.NullString `json:"deployment_config"` + PublicEndpoint sql.NullString `json:"publicEndpoint"` + P2pAddress sql.NullString `json:"p2pAddress"` + CreatedAt time.Time `json:"createdAt"` + CreatedBy sql.NullInt64 `json:"createdBy"` + UpdatedAt sql.NullTime `json:"updatedAt"` + FabricOrganizationID sql.NullInt64 `json:"fabricOrganizationId"` + NodeType sql.NullString `json:"nodeType"` + NodeConfig sql.NullString `json:"nodeConfig"` + DeploymentConfig sql.NullString `json:"deploymentConfig"` + ErrorMessage sql.NullString `json:"errorMessage"` } type NodeEvent struct { ID int64 `json:"id"` - NodeID int64 `json:"node_id"` - EventType string `json:"event_type"` + NodeID int64 `json:"nodeId"` + EventType string `json:"eventType"` Description string `json:"description"` Data sql.NullString `json:"data"` Status string `json:"status"` - CreatedAt time.Time `json:"created_at"` + CreatedAt time.Time `json:"createdAt"` } type NodeKey struct { ID int64 `json:"id"` - NodeID int64 `json:"node_id"` - KeyID int64 `json:"key_id"` - KeyType string `json:"key_type"` - CreatedAt time.Time `json:"created_at"` + NodeID int64 `json:"nodeId"` + KeyID int64 `json:"keyId"` + KeyType string `json:"keyType"` + CreatedAt time.Time `json:"createdAt"` } type NodeKeyType struct { @@ -198,30 +212,37 @@ type NotificationProvider struct { Name string `json:"name"` Type string `json:"type"` Config string `json:"config"` - IsDefault bool `json:"is_default"` - IsEnabled bool `json:"is_enabled"` - CreatedAt time.Time `json:"created_at"` - UpdatedAt time.Time `json:"updated_at"` - NotifyNodeDowntime bool `json:"notify_node_downtime"` - NotifyBackupSuccess bool `json:"notify_backup_success"` - NotifyBackupFailure bool `json:"notify_backup_failure"` - NotifyS3ConnectionIssue bool `json:"notify_s3_connection_issue"` - LastTestAt sql.NullTime `json:"last_test_at"` - LastTestStatus sql.NullString `json:"last_test_status"` - LastTestMessage sql.NullString `json:"last_test_message"` + IsDefault bool `json:"isDefault"` + IsEnabled bool `json:"isEnabled"` + CreatedAt time.Time `json:"createdAt"` + UpdatedAt time.Time `json:"updatedAt"` + NotifyNodeDowntime bool `json:"notifyNodeDowntime"` + NotifyBackupSuccess bool `json:"notifyBackupSuccess"` + NotifyBackupFailure bool `json:"notifyBackupFailure"` + NotifyS3ConnectionIssue bool `json:"notifyS3ConnectionIssue"` + LastTestAt sql.NullTime `json:"lastTestAt"` + LastTestStatus sql.NullString `json:"lastTestStatus"` + LastTestMessage sql.NullString `json:"lastTestMessage"` } type Session struct { ID int64 `json:"id"` - SessionID string `json:"session_id"` - UserID int64 `json:"user_id"` + SessionID string `json:"sessionId"` + UserID int64 `json:"userId"` Token string `json:"token"` - IpAddress sql.NullString `json:"ip_address"` - UserAgent sql.NullString `json:"user_agent"` - CreatedAt time.Time `json:"created_at"` - UpdatedAt time.Time `json:"updated_at"` - ExpiresAt time.Time `json:"expires_at"` - LastActivityAt time.Time `json:"last_activity_at"` + IpAddress sql.NullString `json:"ipAddress"` + UserAgent sql.NullString `json:"userAgent"` + CreatedAt time.Time `json:"createdAt"` + UpdatedAt time.Time `json:"updatedAt"` + ExpiresAt time.Time `json:"expiresAt"` + LastActivityAt time.Time `json:"lastActivityAt"` +} + +type Setting struct { + ID int64 `json:"id"` + Config string `json:"config"` + CreatedAt sql.NullTime `json:"createdAt"` + UpdatedAt sql.NullTime `json:"updatedAt"` } type User struct { @@ -232,9 +253,9 @@ type User struct { Email sql.NullString `json:"email"` Role sql.NullString `json:"role"` Provider sql.NullString `json:"provider"` - ProviderID sql.NullString `json:"provider_id"` - AvatarUrl sql.NullString `json:"avatar_url"` - CreatedAt time.Time `json:"created_at"` - LastLoginAt sql.NullTime `json:"last_login_at"` - UpdatedAt sql.NullTime `json:"updated_at"` + ProviderID sql.NullString `json:"providerId"` + AvatarUrl sql.NullString `json:"avatarUrl"` + CreatedAt time.Time `json:"createdAt"` + LastLoginAt sql.NullTime `json:"lastLoginAt"` + UpdatedAt sql.NullTime `json:"updatedAt"` } diff --git a/pkg/db/querier.go b/pkg/db/querier.go index a6b28b4..6de84ca 100644 --- a/pkg/db/querier.go +++ b/pkg/db/querier.go @@ -10,7 +10,8 @@ import ( ) type Querier interface { - CheckNetworkNodeExists(ctx context.Context, arg CheckNetworkNodeExistsParams) (int64, error) + AddRevokedCertificate(ctx context.Context, arg *AddRevokedCertificateParams) error + CheckNetworkNodeExists(ctx context.Context, arg *CheckNetworkNodeExistsParams) (int64, error) CountBackupsBySchedule(ctx context.Context, scheduleID sql.NullInt64) (int64, error) CountBackupsByTarget(ctx context.Context, targetID int64) (int64, error) CountNetworks(ctx context.Context) (int64, error) @@ -18,21 +19,22 @@ type Querier interface { CountNodes(ctx context.Context) (int64, error) CountNodesByPlatform(ctx context.Context, platform string) (int64, error) CountUsers(ctx context.Context) (int64, error) - CreateBackup(ctx context.Context, arg CreateBackupParams) (Backup, error) - CreateBackupSchedule(ctx context.Context, arg CreateBackupScheduleParams) (BackupSchedule, error) - CreateBackupTarget(ctx context.Context, arg CreateBackupTargetParams) (BackupTarget, error) - CreateFabricOrganization(ctx context.Context, arg CreateFabricOrganizationParams) (FabricOrganization, error) - CreateKey(ctx context.Context, arg CreateKeyParams) (Key, error) - CreateKeyProvider(ctx context.Context, arg CreateKeyProviderParams) (KeyProvider, error) - CreateNetwork(ctx context.Context, arg CreateNetworkParams) (Network, error) - CreateNetworkFull(ctx context.Context, arg CreateNetworkFullParams) (Network, error) + CreateBackup(ctx context.Context, arg *CreateBackupParams) (*Backup, error) + CreateBackupSchedule(ctx context.Context, arg *CreateBackupScheduleParams) (*BackupSchedule, error) + CreateBackupTarget(ctx context.Context, arg *CreateBackupTargetParams) (*BackupTarget, error) + CreateFabricOrganization(ctx context.Context, arg *CreateFabricOrganizationParams) (*FabricOrganization, error) + CreateKey(ctx context.Context, arg *CreateKeyParams) (*Key, error) + CreateKeyProvider(ctx context.Context, arg *CreateKeyProviderParams) (*KeyProvider, error) + CreateNetwork(ctx context.Context, arg *CreateNetworkParams) (*Network, error) + CreateNetworkFull(ctx context.Context, arg *CreateNetworkFullParams) (*Network, error) // Add queries for CRUD operations - CreateNetworkNode(ctx context.Context, arg CreateNetworkNodeParams) (NetworkNode, error) - CreateNode(ctx context.Context, arg CreateNodeParams) (Node, error) - CreateNodeEvent(ctx context.Context, arg CreateNodeEventParams) (NodeEvent, error) - CreateNotificationProvider(ctx context.Context, arg CreateNotificationProviderParams) (NotificationProvider, error) - CreateSession(ctx context.Context, arg CreateSessionParams) (Session, error) - CreateUser(ctx context.Context, arg CreateUserParams) (User, error) + CreateNetworkNode(ctx context.Context, arg *CreateNetworkNodeParams) (*NetworkNode, error) + CreateNode(ctx context.Context, arg *CreateNodeParams) (*Node, error) + CreateNodeEvent(ctx context.Context, arg *CreateNodeEventParams) (*NodeEvent, error) + CreateNotificationProvider(ctx context.Context, arg *CreateNotificationProviderParams) (*NotificationProvider, error) + CreateSession(ctx context.Context, arg *CreateSessionParams) (*Session, error) + CreateSetting(ctx context.Context, config string) (*Setting, error) + CreateUser(ctx context.Context, arg *CreateUserParams) (*User, error) DeleteBackup(ctx context.Context, id int64) error DeleteBackupSchedule(ctx context.Context, id int64) error DeleteBackupTarget(ctx context.Context, id int64) error @@ -43,104 +45,116 @@ type Querier interface { DeleteKey(ctx context.Context, id int64) error DeleteKeyProvider(ctx context.Context, id int64) error DeleteNetwork(ctx context.Context, id int64) error - DeleteNetworkNode(ctx context.Context, arg DeleteNetworkNodeParams) error + DeleteNetworkNode(ctx context.Context, arg *DeleteNetworkNodeParams) error DeleteNode(ctx context.Context, id int64) error DeleteNotificationProvider(ctx context.Context, id int64) error - DeleteOldBackups(ctx context.Context, arg DeleteOldBackupsParams) error + DeleteOldBackups(ctx context.Context, arg *DeleteOldBackupsParams) error + DeleteRevokedCertificate(ctx context.Context, arg *DeleteRevokedCertificateParams) error DeleteSession(ctx context.Context, sessionID string) error + DeleteSetting(ctx context.Context, id int64) error DeleteUser(ctx context.Context, id int64) error DeleteUserSessions(ctx context.Context, userID int64) error - DisableBackupSchedule(ctx context.Context, id int64) (BackupSchedule, error) - EnableBackupSchedule(ctx context.Context, id int64) (BackupSchedule, error) - GetAllKeys(ctx context.Context, arg GetAllKeysParams) ([]GetAllKeysRow, error) - GetAllNodes(ctx context.Context) ([]Node, error) - GetBackup(ctx context.Context, id int64) (Backup, error) - GetBackupSchedule(ctx context.Context, id int64) (BackupSchedule, error) - GetBackupTarget(ctx context.Context, id int64) (BackupTarget, error) - GetBackupsByDateRange(ctx context.Context, arg GetBackupsByDateRangeParams) ([]Backup, error) - GetBackupsByScheduleAndStatus(ctx context.Context, arg GetBackupsByScheduleAndStatusParams) ([]Backup, error) - GetBackupsByStatus(ctx context.Context, status string) ([]Backup, error) - GetDefaultNotificationProvider(ctx context.Context, type_ string) (NotificationProvider, error) - GetDefaultNotificationProviderForType(ctx context.Context, notificationType interface{}) (NotificationProvider, error) - GetFabricOrganization(ctx context.Context, id int64) (FabricOrganization, error) - GetFabricOrganizationByID(ctx context.Context, id int64) (FabricOrganization, error) - GetFabricOrganizationByMSPID(ctx context.Context, mspID string) (FabricOrganization, error) - GetFabricOrganizationByMspID(ctx context.Context, mspID string) (GetFabricOrganizationByMspIDRow, error) - GetFabricOrganizationWithKeys(ctx context.Context, id int64) (GetFabricOrganizationWithKeysRow, error) - GetKey(ctx context.Context, id int64) (GetKeyRow, error) - GetKeyByEthereumAddress(ctx context.Context, ethereumAddress sql.NullString) (GetKeyByEthereumAddressRow, error) - GetKeyByID(ctx context.Context, id int64) (GetKeyByIDRow, error) + DisableBackupSchedule(ctx context.Context, id int64) (*BackupSchedule, error) + EnableBackupSchedule(ctx context.Context, id int64) (*BackupSchedule, error) + GetAllKeys(ctx context.Context, arg *GetAllKeysParams) ([]*GetAllKeysRow, error) + GetAllNodes(ctx context.Context) ([]*Node, error) + GetBackup(ctx context.Context, id int64) (*Backup, error) + GetBackupSchedule(ctx context.Context, id int64) (*BackupSchedule, error) + GetBackupTarget(ctx context.Context, id int64) (*BackupTarget, error) + GetBackupsByDateRange(ctx context.Context, arg *GetBackupsByDateRangeParams) ([]*Backup, error) + GetBackupsByScheduleAndStatus(ctx context.Context, arg *GetBackupsByScheduleAndStatusParams) ([]*Backup, error) + GetBackupsByStatus(ctx context.Context, status string) ([]*Backup, error) + GetDefaultNotificationProvider(ctx context.Context, type_ string) (*NotificationProvider, error) + GetDefaultNotificationProviderForType(ctx context.Context, notificationType interface{}) (*NotificationProvider, error) + GetFabricOrganization(ctx context.Context, id int64) (*FabricOrganization, error) + GetFabricOrganizationByID(ctx context.Context, id int64) (*FabricOrganization, error) + GetFabricOrganizationByMSPID(ctx context.Context, mspID string) (*FabricOrganization, error) + GetFabricOrganizationByMspID(ctx context.Context, mspID string) (*GetFabricOrganizationByMspIDRow, error) + GetFabricOrganizationWithKeys(ctx context.Context, id int64) (*GetFabricOrganizationWithKeysRow, error) + GetKey(ctx context.Context, id int64) (*GetKeyRow, error) + GetKeyByEthereumAddress(ctx context.Context, ethereumAddress sql.NullString) (*GetKeyByEthereumAddressRow, error) + GetKeyByID(ctx context.Context, id int64) (*GetKeyByIDRow, error) GetKeyCountByProvider(ctx context.Context, providerID int64) (int64, error) - GetKeyProvider(ctx context.Context, id int64) (KeyProvider, error) - GetKeyProviderByDefault(ctx context.Context) (KeyProvider, error) - GetKeyProviderByID(ctx context.Context, id int64) (KeyProvider, error) - GetKeysByAlgorithm(ctx context.Context, algorithm string) ([]Key, error) - GetKeysByProviderAndCurve(ctx context.Context, arg GetKeysByProviderAndCurveParams) ([]Key, error) + GetKeyProvider(ctx context.Context, id int64) (*KeyProvider, error) + GetKeyProviderByDefault(ctx context.Context) (*KeyProvider, error) + GetKeyProviderByID(ctx context.Context, id int64) (*KeyProvider, error) + GetKeysByFilter(ctx context.Context, arg *GetKeysByFilterParams) ([]*GetKeysByFilterRow, error) GetKeysCount(ctx context.Context) (int64, error) - GetLatestNodeEvent(ctx context.Context, nodeID int64) (NodeEvent, error) - GetNetwork(ctx context.Context, id int64) (Network, error) - GetNetworkByName(ctx context.Context, name string) (Network, error) - GetNetworkByNetworkId(ctx context.Context, networkID sql.NullString) (Network, error) + GetLatestNodeEvent(ctx context.Context, nodeID int64) (*NodeEvent, error) + GetNetwork(ctx context.Context, id int64) (*Network, error) + GetNetworkByName(ctx context.Context, name string) (*Network, error) + GetNetworkByNetworkId(ctx context.Context, networkID sql.NullString) (*Network, error) GetNetworkCurrentConfigBlock(ctx context.Context, id int64) (sql.NullString, error) - GetNetworkNode(ctx context.Context, arg GetNetworkNodeParams) (NetworkNode, error) - GetNetworkNodes(ctx context.Context, networkID int64) ([]GetNetworkNodesRow, error) - GetNode(ctx context.Context, id int64) (Node, error) - GetNodeBySlug(ctx context.Context, slug string) (Node, error) - GetNodeEvent(ctx context.Context, id int64) (NodeEvent, error) - GetNotificationProvider(ctx context.Context, id int64) (NotificationProvider, error) - GetOldestBackupByTarget(ctx context.Context, targetID int64) (Backup, error) - GetOrdererPorts(ctx context.Context) ([]GetOrdererPortsRow, error) - GetPeerPorts(ctx context.Context) ([]GetPeerPortsRow, error) - GetProvidersByNotificationType(ctx context.Context, arg GetProvidersByNotificationTypeParams) ([]NotificationProvider, error) - GetRecentCompletedBackups(ctx context.Context) ([]Backup, error) - GetSession(ctx context.Context, sessionID string) (GetSessionRow, error) - GetUser(ctx context.Context, id int64) (User, error) - GetUserByUsername(ctx context.Context, username string) (User, error) - ListBackupSchedules(ctx context.Context) ([]BackupSchedule, error) - ListBackupTargets(ctx context.Context) ([]BackupTarget, error) - ListBackups(ctx context.Context, arg ListBackupsParams) ([]Backup, error) - ListBackupsBySchedule(ctx context.Context, scheduleID sql.NullInt64) ([]Backup, error) - ListBackupsByTarget(ctx context.Context, targetID int64) ([]Backup, error) - ListFabricOrganizations(ctx context.Context) ([]FabricOrganization, error) - ListFabricOrganizationsWithKeys(ctx context.Context) ([]ListFabricOrganizationsWithKeysRow, error) - ListKeyProviders(ctx context.Context) ([]KeyProvider, error) - ListKeys(ctx context.Context, arg ListKeysParams) ([]ListKeysRow, error) - ListNetworkNodesByNetwork(ctx context.Context, networkID int64) ([]NetworkNode, error) - ListNetworkNodesByNode(ctx context.Context, nodeID int64) ([]NetworkNode, error) - ListNetworks(ctx context.Context) ([]Network, error) - ListNodeEvents(ctx context.Context, arg ListNodeEventsParams) ([]NodeEvent, error) - ListNodeEventsByType(ctx context.Context, arg ListNodeEventsByTypeParams) ([]NodeEvent, error) - ListNodes(ctx context.Context, arg ListNodesParams) ([]Node, error) - ListNodesByNetwork(ctx context.Context, arg ListNodesByNetworkParams) ([]Node, error) - ListNodesByPlatform(ctx context.Context, arg ListNodesByPlatformParams) ([]Node, error) - ListNotificationProviders(ctx context.Context) ([]NotificationProvider, error) - ListUsers(ctx context.Context) ([]User, error) + GetNetworkNode(ctx context.Context, arg *GetNetworkNodeParams) (*NetworkNode, error) + GetNetworkNodes(ctx context.Context, networkID int64) ([]*GetNetworkNodesRow, error) + GetNode(ctx context.Context, id int64) (*Node, error) + GetNodeBySlug(ctx context.Context, slug string) (*Node, error) + GetNodeEvent(ctx context.Context, id int64) (*NodeEvent, error) + GetNotificationProvider(ctx context.Context, id int64) (*NotificationProvider, error) + GetOldestBackupByTarget(ctx context.Context, targetID int64) (*Backup, error) + GetOrdererPorts(ctx context.Context) ([]*GetOrdererPortsRow, error) + GetOrganizationCRLInfo(ctx context.Context, id int64) (*GetOrganizationCRLInfoRow, error) + GetPeerPorts(ctx context.Context) ([]*GetPeerPortsRow, error) + GetProvidersByNotificationType(ctx context.Context, arg *GetProvidersByNotificationTypeParams) ([]*NotificationProvider, error) + GetRecentCompletedBackups(ctx context.Context) ([]*Backup, error) + GetRevokedCertificate(ctx context.Context, arg *GetRevokedCertificateParams) (*FabricRevokedCertificate, error) + GetRevokedCertificateCount(ctx context.Context, fabricOrganizationID int64) (int64, error) + GetRevokedCertificates(ctx context.Context, fabricOrganizationID int64) ([]*FabricRevokedCertificate, error) + GetSession(ctx context.Context, sessionID string) (*GetSessionRow, error) + GetSetting(ctx context.Context, id int64) (*Setting, error) + GetUser(ctx context.Context, id int64) (*User, error) + GetUserByUsername(ctx context.Context, username string) (*User, error) + ListBackupSchedules(ctx context.Context) ([]*BackupSchedule, error) + ListBackupTargets(ctx context.Context) ([]*BackupTarget, error) + ListBackups(ctx context.Context, arg *ListBackupsParams) ([]*Backup, error) + ListBackupsBySchedule(ctx context.Context, scheduleID sql.NullInt64) ([]*Backup, error) + ListBackupsByTarget(ctx context.Context, targetID int64) ([]*Backup, error) + ListFabricOrganizations(ctx context.Context) ([]*FabricOrganization, error) + ListFabricOrganizationsWithKeys(ctx context.Context) ([]*ListFabricOrganizationsWithKeysRow, error) + ListKeyProviders(ctx context.Context) ([]*KeyProvider, error) + ListKeys(ctx context.Context, arg *ListKeysParams) ([]*ListKeysRow, error) + ListNetworkNodesByNetwork(ctx context.Context, networkID int64) ([]*NetworkNode, error) + ListNetworkNodesByNode(ctx context.Context, nodeID int64) ([]*NetworkNode, error) + ListNetworks(ctx context.Context) ([]*Network, error) + ListNodeEvents(ctx context.Context, arg *ListNodeEventsParams) ([]*NodeEvent, error) + ListNodeEventsByType(ctx context.Context, arg *ListNodeEventsByTypeParams) ([]*NodeEvent, error) + ListNodes(ctx context.Context, arg *ListNodesParams) ([]*Node, error) + ListNodesByNetwork(ctx context.Context, arg *ListNodesByNetworkParams) ([]*Node, error) + ListNodesByPlatform(ctx context.Context, arg *ListNodesByPlatformParams) ([]*Node, error) + ListNotificationProviders(ctx context.Context) ([]*NotificationProvider, error) + ListSettings(ctx context.Context) ([]*Setting, error) + ListUsers(ctx context.Context) ([]*User, error) MarkBackupNotified(ctx context.Context, id int64) error UnsetDefaultNotificationProvider(ctx context.Context, type_ string) error UnsetDefaultProvider(ctx context.Context) error - UpdateBackupCompleted(ctx context.Context, arg UpdateBackupCompletedParams) (Backup, error) - UpdateBackupFailed(ctx context.Context, arg UpdateBackupFailedParams) (Backup, error) - UpdateBackupSchedule(ctx context.Context, arg UpdateBackupScheduleParams) (BackupSchedule, error) - UpdateBackupScheduleLastRun(ctx context.Context, arg UpdateBackupScheduleLastRunParams) (BackupSchedule, error) - UpdateBackupSize(ctx context.Context, arg UpdateBackupSizeParams) (Backup, error) - UpdateBackupStatus(ctx context.Context, arg UpdateBackupStatusParams) (Backup, error) - UpdateBackupTarget(ctx context.Context, arg UpdateBackupTargetParams) (BackupTarget, error) - UpdateFabricOrganization(ctx context.Context, arg UpdateFabricOrganizationParams) (FabricOrganization, error) - UpdateKey(ctx context.Context, arg UpdateKeyParams) (Key, error) - UpdateKeyProvider(ctx context.Context, arg UpdateKeyProviderParams) (KeyProvider, error) - UpdateNetworkCurrentConfigBlock(ctx context.Context, arg UpdateNetworkCurrentConfigBlockParams) error - UpdateNetworkGenesisBlock(ctx context.Context, arg UpdateNetworkGenesisBlockParams) (Network, error) - UpdateNetworkNodeRole(ctx context.Context, arg UpdateNetworkNodeRoleParams) (NetworkNode, error) - UpdateNetworkNodeStatus(ctx context.Context, arg UpdateNetworkNodeStatusParams) (NetworkNode, error) - UpdateNetworkStatus(ctx context.Context, arg UpdateNetworkStatusParams) error - UpdateNodeDeploymentConfig(ctx context.Context, arg UpdateNodeDeploymentConfigParams) (Node, error) - UpdateNodeEndpoint(ctx context.Context, arg UpdateNodeEndpointParams) (Node, error) - UpdateNodePublicEndpoint(ctx context.Context, arg UpdateNodePublicEndpointParams) (Node, error) - UpdateNodeStatus(ctx context.Context, arg UpdateNodeStatusParams) (Node, error) - UpdateNotificationProvider(ctx context.Context, arg UpdateNotificationProviderParams) (NotificationProvider, error) - UpdateProviderTestResults(ctx context.Context, arg UpdateProviderTestResultsParams) (NotificationProvider, error) - UpdateUser(ctx context.Context, arg UpdateUserParams) (User, error) - UpdateUserLastLogin(ctx context.Context, id int64) (User, error) + UpdateBackupCompleted(ctx context.Context, arg *UpdateBackupCompletedParams) (*Backup, error) + UpdateBackupFailed(ctx context.Context, arg *UpdateBackupFailedParams) (*Backup, error) + UpdateBackupSchedule(ctx context.Context, arg *UpdateBackupScheduleParams) (*BackupSchedule, error) + UpdateBackupScheduleLastRun(ctx context.Context, arg *UpdateBackupScheduleLastRunParams) (*BackupSchedule, error) + UpdateBackupSize(ctx context.Context, arg *UpdateBackupSizeParams) (*Backup, error) + UpdateBackupStatus(ctx context.Context, arg *UpdateBackupStatusParams) (*Backup, error) + UpdateBackupTarget(ctx context.Context, arg *UpdateBackupTargetParams) (*BackupTarget, error) + UpdateDeploymentConfig(ctx context.Context, arg *UpdateDeploymentConfigParams) (*Node, error) + UpdateFabricOrganization(ctx context.Context, arg *UpdateFabricOrganizationParams) (*FabricOrganization, error) + UpdateKey(ctx context.Context, arg *UpdateKeyParams) (*Key, error) + UpdateKeyProvider(ctx context.Context, arg *UpdateKeyProviderParams) (*KeyProvider, error) + UpdateNetworkCurrentConfigBlock(ctx context.Context, arg *UpdateNetworkCurrentConfigBlockParams) error + UpdateNetworkGenesisBlock(ctx context.Context, arg *UpdateNetworkGenesisBlockParams) (*Network, error) + UpdateNetworkNodeRole(ctx context.Context, arg *UpdateNetworkNodeRoleParams) (*NetworkNode, error) + UpdateNetworkNodeStatus(ctx context.Context, arg *UpdateNetworkNodeStatusParams) (*NetworkNode, error) + UpdateNetworkStatus(ctx context.Context, arg *UpdateNetworkStatusParams) error + UpdateNodeConfig(ctx context.Context, arg *UpdateNodeConfigParams) (*Node, error) + UpdateNodeDeploymentConfig(ctx context.Context, arg *UpdateNodeDeploymentConfigParams) (*Node, error) + UpdateNodeEndpoint(ctx context.Context, arg *UpdateNodeEndpointParams) (*Node, error) + UpdateNodePublicEndpoint(ctx context.Context, arg *UpdateNodePublicEndpointParams) (*Node, error) + UpdateNodeStatus(ctx context.Context, arg *UpdateNodeStatusParams) (*Node, error) + UpdateNodeStatusWithError(ctx context.Context, arg *UpdateNodeStatusWithErrorParams) (*Node, error) + UpdateNotificationProvider(ctx context.Context, arg *UpdateNotificationProviderParams) (*NotificationProvider, error) + UpdateOrganizationCRL(ctx context.Context, arg *UpdateOrganizationCRLParams) error + UpdateProviderTestResults(ctx context.Context, arg *UpdateProviderTestResultsParams) (*NotificationProvider, error) + UpdateSetting(ctx context.Context, arg *UpdateSettingParams) (*Setting, error) + UpdateUser(ctx context.Context, arg *UpdateUserParams) (*User, error) + UpdateUserLastLogin(ctx context.Context, id int64) (*User, error) } var _ Querier = (*Queries)(nil) diff --git a/pkg/db/queries.sql b/pkg/db/queries.sql index 953ea94..cbbfb91 100644 --- a/pkg/db/queries.sql +++ b/pkg/db/queries.sql @@ -207,7 +207,8 @@ SET name = ?, provider_id = ?, user_id = ?, ethereum_address = ?, - updated_at = CURRENT_TIMESTAMP + updated_at = CURRENT_TIMESTAMP, + signing_key_id = ? WHERE id = ? RETURNING *; @@ -267,9 +268,33 @@ WHERE id = ? RETURNING *; +-- name: UpdateNodeConfig :one +UPDATE nodes +SET node_config = ?, + updated_at = CURRENT_TIMESTAMP +WHERE id = ? +RETURNING *; + +-- name: UpdateDeploymentConfig :one +UPDATE nodes +SET deployment_config = ?, + updated_at = CURRENT_TIMESTAMP +WHERE id = ? +RETURNING *; + + +-- name: UpdateNodeStatusWithError :one +UPDATE nodes +SET status = ?, + error_message = ?, + updated_at = CURRENT_TIMESTAMP +WHERE id = ? +RETURNING *; + -- name: UpdateNodeStatus :one UPDATE nodes SET status = ?, + error_message = NULL, updated_at = CURRENT_TIMESTAMP WHERE id = ? RETURNING *; @@ -485,12 +510,13 @@ FROM keys k JOIN key_providers kp ON k.provider_id = kp.id WHERE k.ethereum_address = ?; --- name: GetKeysByAlgorithm :many -SELECT * FROM keys WHERE algorithm = ?; - --- name: GetKeysByProviderAndCurve :many -SELECT * FROM keys WHERE provider_id = ? AND curve = ?; - +-- name: GetKeysByFilter :many +SELECT k.*, kp.name as provider_name, kp.type as provider_type +FROM keys k +JOIN key_providers kp ON k.provider_id = kp.id +WHERE (@algorithm_filter = '' OR k.algorithm = @algorithm) + AND (@provider_id_filter = 0 OR k.provider_id = @provider_id) + AND (@curve_filter = '' OR k.curve = @curve); -- name: UpdateNodeEndpoint :one UPDATE nodes @@ -869,4 +895,68 @@ WHERE is_default = true (:notification_type = 'NODE_DOWNTIME' AND notify_node_downtime = true) OR (:notification_type = 'S3_CONNECTION_ISSUE' AND notify_s3_connection_issue = true) ) -LIMIT 1; \ No newline at end of file +LIMIT 1; + +-- name: AddRevokedCertificate :exec +INSERT INTO fabric_revoked_certificates ( + fabric_organization_id, + serial_number, + revocation_time, + reason, + issuer_certificate_id +) VALUES (?, ?, ?, ?, ?); + +-- name: GetRevokedCertificates :many +SELECT * FROM fabric_revoked_certificates +WHERE fabric_organization_id = ? +ORDER BY revocation_time DESC; + +-- name: GetRevokedCertificate :one +SELECT * FROM fabric_revoked_certificates +WHERE fabric_organization_id = ? AND serial_number = ?; + +-- name: UpdateOrganizationCRL :exec +UPDATE fabric_organizations +SET crl_last_update = ?, + crl_key_id = ? +WHERE id = ?; + +-- name: GetOrganizationCRLInfo :one +SELECT crl_key_id, crl_last_update +FROM fabric_organizations +WHERE id = ?; + +-- name: CreateSetting :one +INSERT INTO settings ( + config +) VALUES ( + ? +) +RETURNING *; + +-- name: GetSetting :one +SELECT * FROM settings +WHERE id = ? LIMIT 1; + +-- name: ListSettings :many +SELECT * FROM settings +ORDER BY created_at DESC; + +-- name: UpdateSetting :one +UPDATE settings +SET config = ?, + updated_at = CURRENT_TIMESTAMP +WHERE id = ? +RETURNING *; + +-- name: DeleteSetting :exec +DELETE FROM settings +WHERE id = ?; + +-- name: DeleteRevokedCertificate :exec +DELETE FROM fabric_revoked_certificates +WHERE fabric_organization_id = ? AND serial_number = ?; + +-- name: GetRevokedCertificateCount :one +SELECT COUNT(*) FROM fabric_revoked_certificates +WHERE fabric_organization_id = ?; diff --git a/pkg/db/queries.sql.go b/pkg/db/queries.sql.go index cece1cd..bb345ea 100644 --- a/pkg/db/queries.sql.go +++ b/pkg/db/queries.sql.go @@ -11,104 +11,133 @@ import ( "time" ) -const checkNetworkNodeExists = `-- name: CheckNetworkNodeExists :one +const AddRevokedCertificate = `-- name: AddRevokedCertificate :exec +INSERT INTO fabric_revoked_certificates ( + fabric_organization_id, + serial_number, + revocation_time, + reason, + issuer_certificate_id +) VALUES (?, ?, ?, ?, ?) +` + +type AddRevokedCertificateParams struct { + FabricOrganizationID int64 `json:"fabricOrganizationId"` + SerialNumber string `json:"serialNumber"` + RevocationTime time.Time `json:"revocationTime"` + Reason int64 `json:"reason"` + IssuerCertificateID sql.NullInt64 `json:"issuerCertificateId"` +} + +func (q *Queries) AddRevokedCertificate(ctx context.Context, arg *AddRevokedCertificateParams) error { + _, err := q.db.ExecContext(ctx, AddRevokedCertificate, + arg.FabricOrganizationID, + arg.SerialNumber, + arg.RevocationTime, + arg.Reason, + arg.IssuerCertificateID, + ) + return err +} + +const CheckNetworkNodeExists = `-- name: CheckNetworkNodeExists :one SELECT EXISTS(SELECT 1 FROM network_nodes WHERE network_id = ? AND node_id = ?) ` type CheckNetworkNodeExistsParams struct { - NetworkID int64 `json:"network_id"` - NodeID int64 `json:"node_id"` + NetworkID int64 `json:"networkId"` + NodeID int64 `json:"nodeId"` } -func (q *Queries) CheckNetworkNodeExists(ctx context.Context, arg CheckNetworkNodeExistsParams) (int64, error) { - row := q.queryRow(ctx, q.checkNetworkNodeExistsStmt, checkNetworkNodeExists, arg.NetworkID, arg.NodeID) +func (q *Queries) CheckNetworkNodeExists(ctx context.Context, arg *CheckNetworkNodeExistsParams) (int64, error) { + row := q.db.QueryRowContext(ctx, CheckNetworkNodeExists, arg.NetworkID, arg.NodeID) var column_1 int64 err := row.Scan(&column_1) return column_1, err } -const countBackupsBySchedule = `-- name: CountBackupsBySchedule :one +const CountBackupsBySchedule = `-- name: CountBackupsBySchedule :one SELECT COUNT(*) FROM backups WHERE schedule_id = ? ` func (q *Queries) CountBackupsBySchedule(ctx context.Context, scheduleID sql.NullInt64) (int64, error) { - row := q.queryRow(ctx, q.countBackupsByScheduleStmt, countBackupsBySchedule, scheduleID) + row := q.db.QueryRowContext(ctx, CountBackupsBySchedule, scheduleID) var count int64 err := row.Scan(&count) return count, err } -const countBackupsByTarget = `-- name: CountBackupsByTarget :one +const CountBackupsByTarget = `-- name: CountBackupsByTarget :one SELECT COUNT(*) FROM backups WHERE target_id = ? ` func (q *Queries) CountBackupsByTarget(ctx context.Context, targetID int64) (int64, error) { - row := q.queryRow(ctx, q.countBackupsByTargetStmt, countBackupsByTarget, targetID) + row := q.db.QueryRowContext(ctx, CountBackupsByTarget, targetID) var count int64 err := row.Scan(&count) return count, err } -const countNetworks = `-- name: CountNetworks :one +const CountNetworks = `-- name: CountNetworks :one SELECT COUNT(*) FROM networks ` func (q *Queries) CountNetworks(ctx context.Context) (int64, error) { - row := q.queryRow(ctx, q.countNetworksStmt, countNetworks) + row := q.db.QueryRowContext(ctx, CountNetworks) var count int64 err := row.Scan(&count) return count, err } -const countNodeEvents = `-- name: CountNodeEvents :one +const CountNodeEvents = `-- name: CountNodeEvents :one SELECT COUNT(*) FROM node_events WHERE node_id = ? ` func (q *Queries) CountNodeEvents(ctx context.Context, nodeID int64) (int64, error) { - row := q.queryRow(ctx, q.countNodeEventsStmt, countNodeEvents, nodeID) + row := q.db.QueryRowContext(ctx, CountNodeEvents, nodeID) var count int64 err := row.Scan(&count) return count, err } -const countNodes = `-- name: CountNodes :one +const CountNodes = `-- name: CountNodes :one SELECT COUNT(*) FROM nodes ` func (q *Queries) CountNodes(ctx context.Context) (int64, error) { - row := q.queryRow(ctx, q.countNodesStmt, countNodes) + row := q.db.QueryRowContext(ctx, CountNodes) var count int64 err := row.Scan(&count) return count, err } -const countNodesByPlatform = `-- name: CountNodesByPlatform :one +const CountNodesByPlatform = `-- name: CountNodesByPlatform :one SELECT COUNT(*) FROM nodes WHERE platform = ? ` func (q *Queries) CountNodesByPlatform(ctx context.Context, platform string) (int64, error) { - row := q.queryRow(ctx, q.countNodesByPlatformStmt, countNodesByPlatform, platform) + row := q.db.QueryRowContext(ctx, CountNodesByPlatform, platform) var count int64 err := row.Scan(&count) return count, err } -const countUsers = `-- name: CountUsers :one +const CountUsers = `-- name: CountUsers :one SELECT COUNT(*) FROM users ` func (q *Queries) CountUsers(ctx context.Context) (int64, error) { - row := q.queryRow(ctx, q.countUsersStmt, countUsers) + row := q.db.QueryRowContext(ctx, CountUsers) var count int64 err := row.Scan(&count) return count, err } -const createBackup = `-- name: CreateBackup :one +const CreateBackup = `-- name: CreateBackup :one INSERT INTO backups ( schedule_id, target_id, @@ -125,14 +154,14 @@ INSERT INTO backups ( ` type CreateBackupParams struct { - ScheduleID sql.NullInt64 `json:"schedule_id"` - TargetID int64 `json:"target_id"` + ScheduleID sql.NullInt64 `json:"scheduleId"` + TargetID int64 `json:"targetId"` Status string `json:"status"` - StartedAt time.Time `json:"started_at"` + StartedAt time.Time `json:"startedAt"` } -func (q *Queries) CreateBackup(ctx context.Context, arg CreateBackupParams) (Backup, error) { - row := q.queryRow(ctx, q.createBackupStmt, createBackup, +func (q *Queries) CreateBackup(ctx context.Context, arg *CreateBackupParams) (*Backup, error) { + row := q.db.QueryRowContext(ctx, CreateBackup, arg.ScheduleID, arg.TargetID, arg.Status, @@ -151,10 +180,10 @@ func (q *Queries) CreateBackup(ctx context.Context, arg CreateBackupParams) (Bac &i.CreatedAt, &i.NotificationSent, ) - return i, err + return &i, err } -const createBackupSchedule = `-- name: CreateBackupSchedule :one +const CreateBackupSchedule = `-- name: CreateBackupSchedule :one INSERT INTO backup_schedules ( name, description, @@ -179,14 +208,14 @@ INSERT INTO backup_schedules ( type CreateBackupScheduleParams struct { Name string `json:"name"` Description sql.NullString `json:"description"` - CronExpression string `json:"cron_expression"` - TargetID int64 `json:"target_id"` - RetentionDays int64 `json:"retention_days"` + CronExpression string `json:"cronExpression"` + TargetID int64 `json:"targetId"` + RetentionDays int64 `json:"retentionDays"` Enabled bool `json:"enabled"` } -func (q *Queries) CreateBackupSchedule(ctx context.Context, arg CreateBackupScheduleParams) (BackupSchedule, error) { - row := q.queryRow(ctx, q.createBackupScheduleStmt, createBackupSchedule, +func (q *Queries) CreateBackupSchedule(ctx context.Context, arg *CreateBackupScheduleParams) (*BackupSchedule, error) { + row := q.db.QueryRowContext(ctx, CreateBackupSchedule, arg.Name, arg.Description, arg.CronExpression, @@ -208,10 +237,10 @@ func (q *Queries) CreateBackupSchedule(ctx context.Context, arg CreateBackupSche &i.LastRunAt, &i.NextRunAt, ) - return i, err + return &i, err } -const createBackupTarget = `-- name: CreateBackupTarget :one +const CreateBackupTarget = `-- name: CreateBackupTarget :one INSERT INTO backup_targets ( name, type, @@ -244,18 +273,18 @@ INSERT INTO backup_targets ( type CreateBackupTargetParams struct { Name string `json:"name"` Type string `json:"type"` - BucketName sql.NullString `json:"bucket_name"` + BucketName sql.NullString `json:"bucketName"` Region sql.NullString `json:"region"` Endpoint sql.NullString `json:"endpoint"` - BucketPath sql.NullString `json:"bucket_path"` - AccessKeyID sql.NullString `json:"access_key_id"` - SecretKey sql.NullString `json:"secret_key"` - S3PathStyle sql.NullBool `json:"s3_path_style"` - ResticPassword sql.NullString `json:"restic_password"` + BucketPath sql.NullString `json:"bucketPath"` + AccessKeyID sql.NullString `json:"accessKeyId"` + SecretKey sql.NullString `json:"secretKey"` + S3PathStyle sql.NullBool `json:"s3PathStyle"` + ResticPassword sql.NullString `json:"resticPassword"` } -func (q *Queries) CreateBackupTarget(ctx context.Context, arg CreateBackupTargetParams) (BackupTarget, error) { - row := q.queryRow(ctx, q.createBackupTargetStmt, createBackupTarget, +func (q *Queries) CreateBackupTarget(ctx context.Context, arg *CreateBackupTargetParams) (*BackupTarget, error) { + row := q.db.QueryRowContext(ctx, CreateBackupTarget, arg.Name, arg.Type, arg.BucketName, @@ -283,10 +312,10 @@ func (q *Queries) CreateBackupTarget(ctx context.Context, arg CreateBackupTarget &i.CreatedAt, &i.UpdatedAt, ) - return i, err + return &i, err } -const createFabricOrganization = `-- name: CreateFabricOrganization :one +const CreateFabricOrganization = `-- name: CreateFabricOrganization :one INSERT INTO fabric_organizations ( msp_id, description, config, ca_config, sign_key_id, tls_root_key_id, provider_id, created_by, @@ -294,25 +323,25 @@ INSERT INTO fabric_organizations ( ) VALUES ( ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ? ) -RETURNING id, msp_id, description, config, ca_config, sign_key_id, tls_root_key_id, admin_tls_key_id, admin_sign_key_id, client_sign_key_id, provider_id, created_at, created_by, updated_at +RETURNING id, msp_id, description, config, ca_config, sign_key_id, tls_root_key_id, admin_tls_key_id, admin_sign_key_id, client_sign_key_id, provider_id, created_at, created_by, updated_at, crl_key_id, crl_last_update ` type CreateFabricOrganizationParams struct { - MspID string `json:"msp_id"` + MspID string `json:"mspId"` Description sql.NullString `json:"description"` Config sql.NullString `json:"config"` - CaConfig sql.NullString `json:"ca_config"` - SignKeyID sql.NullInt64 `json:"sign_key_id"` - TlsRootKeyID sql.NullInt64 `json:"tls_root_key_id"` - ProviderID sql.NullInt64 `json:"provider_id"` - CreatedBy sql.NullInt64 `json:"created_by"` - AdminTlsKeyID sql.NullInt64 `json:"admin_tls_key_id"` - AdminSignKeyID sql.NullInt64 `json:"admin_sign_key_id"` - ClientSignKeyID sql.NullInt64 `json:"client_sign_key_id"` -} - -func (q *Queries) CreateFabricOrganization(ctx context.Context, arg CreateFabricOrganizationParams) (FabricOrganization, error) { - row := q.queryRow(ctx, q.createFabricOrganizationStmt, createFabricOrganization, + CaConfig sql.NullString `json:"caConfig"` + SignKeyID sql.NullInt64 `json:"signKeyId"` + TlsRootKeyID sql.NullInt64 `json:"tlsRootKeyId"` + ProviderID sql.NullInt64 `json:"providerId"` + CreatedBy sql.NullInt64 `json:"createdBy"` + AdminTlsKeyID sql.NullInt64 `json:"adminTlsKeyId"` + AdminSignKeyID sql.NullInt64 `json:"adminSignKeyId"` + ClientSignKeyID sql.NullInt64 `json:"clientSignKeyId"` +} + +func (q *Queries) CreateFabricOrganization(ctx context.Context, arg *CreateFabricOrganizationParams) (*FabricOrganization, error) { + row := q.db.QueryRowContext(ctx, CreateFabricOrganization, arg.MspID, arg.Description, arg.Config, @@ -341,11 +370,13 @@ func (q *Queries) CreateFabricOrganization(ctx context.Context, arg CreateFabric &i.CreatedAt, &i.CreatedBy, &i.UpdatedAt, + &i.CrlKeyID, + &i.CrlLastUpdate, ) - return i, err + return &i, err } -const createKey = `-- name: CreateKey :one +const CreateKey = `-- name: CreateKey :one INSERT INTO keys ( name, description, algorithm, key_size, curve, format, public_key, private_key, certificate, status, expires_at, sha256_fingerprint, @@ -359,24 +390,24 @@ type CreateKeyParams struct { Name string `json:"name"` Description sql.NullString `json:"description"` Algorithm string `json:"algorithm"` - KeySize sql.NullInt64 `json:"key_size"` + KeySize sql.NullInt64 `json:"keySize"` Curve sql.NullString `json:"curve"` Format string `json:"format"` - PublicKey string `json:"public_key"` - PrivateKey string `json:"private_key"` + PublicKey string `json:"publicKey"` + PrivateKey string `json:"privateKey"` Certificate sql.NullString `json:"certificate"` Status string `json:"status"` - ExpiresAt sql.NullTime `json:"expires_at"` - Sha256Fingerprint string `json:"sha256_fingerprint"` - Sha1Fingerprint string `json:"sha1_fingerprint"` - ProviderID int64 `json:"provider_id"` - UserID int64 `json:"user_id"` - IsCa int64 `json:"is_ca"` - EthereumAddress sql.NullString `json:"ethereum_address"` + ExpiresAt sql.NullTime `json:"expiresAt"` + Sha256Fingerprint string `json:"sha256Fingerprint"` + Sha1Fingerprint string `json:"sha1Fingerprint"` + ProviderID int64 `json:"providerId"` + UserID int64 `json:"userId"` + IsCa int64 `json:"isCa"` + EthereumAddress sql.NullString `json:"ethereumAddress"` } -func (q *Queries) CreateKey(ctx context.Context, arg CreateKeyParams) (Key, error) { - row := q.queryRow(ctx, q.createKeyStmt, createKey, +func (q *Queries) CreateKey(ctx context.Context, arg *CreateKeyParams) (*Key, error) { + row := q.db.QueryRowContext(ctx, CreateKey, arg.Name, arg.Description, arg.Algorithm, @@ -420,10 +451,10 @@ func (q *Queries) CreateKey(ctx context.Context, arg CreateKeyParams) (Key, erro &i.IsCa, &i.EthereumAddress, ) - return i, err + return &i, err } -const createKeyProvider = `-- name: CreateKeyProvider :one +const CreateKeyProvider = `-- name: CreateKeyProvider :one INSERT INTO key_providers (name, type, is_default, config) VALUES (?, ?, ?, ?) RETURNING id, name, type, is_default, config, created_at, updated_at @@ -432,12 +463,12 @@ RETURNING id, name, type, is_default, config, created_at, updated_at type CreateKeyProviderParams struct { Name string `json:"name"` Type string `json:"type"` - IsDefault int64 `json:"is_default"` + IsDefault int64 `json:"isDefault"` Config string `json:"config"` } -func (q *Queries) CreateKeyProvider(ctx context.Context, arg CreateKeyProviderParams) (KeyProvider, error) { - row := q.queryRow(ctx, q.createKeyProviderStmt, createKeyProvider, +func (q *Queries) CreateKeyProvider(ctx context.Context, arg *CreateKeyProviderParams) (*KeyProvider, error) { + row := q.db.QueryRowContext(ctx, CreateKeyProvider, arg.Name, arg.Type, arg.IsDefault, @@ -453,10 +484,10 @@ func (q *Queries) CreateKeyProvider(ctx context.Context, arg CreateKeyProviderPa &i.CreatedAt, &i.UpdatedAt, ) - return i, err + return &i, err } -const createNetwork = `-- name: CreateNetwork :one +const CreateNetwork = `-- name: CreateNetwork :one INSERT INTO networks ( name, platform, status, description, config, deployment_config, exposed_ports, domain, created_by, network_id @@ -472,15 +503,15 @@ type CreateNetworkParams struct { Status string `json:"status"` Description sql.NullString `json:"description"` Config sql.NullString `json:"config"` - DeploymentConfig sql.NullString `json:"deployment_config"` - ExposedPorts sql.NullString `json:"exposed_ports"` + DeploymentConfig sql.NullString `json:"deploymentConfig"` + ExposedPorts sql.NullString `json:"exposedPorts"` Domain sql.NullString `json:"domain"` - CreatedBy sql.NullInt64 `json:"created_by"` - NetworkID sql.NullString `json:"network_id"` + CreatedBy sql.NullInt64 `json:"createdBy"` + NetworkID sql.NullString `json:"networkId"` } -func (q *Queries) CreateNetwork(ctx context.Context, arg CreateNetworkParams) (Network, error) { - row := q.queryRow(ctx, q.createNetworkStmt, createNetwork, +func (q *Queries) CreateNetwork(ctx context.Context, arg *CreateNetworkParams) (*Network, error) { + row := q.db.QueryRowContext(ctx, CreateNetwork, arg.Name, arg.Platform, arg.Status, @@ -510,10 +541,10 @@ func (q *Queries) CreateNetwork(ctx context.Context, arg CreateNetworkParams) (N &i.GenesisBlockB64, &i.CurrentConfigBlockB64, ) - return i, err + return &i, err } -const createNetworkFull = `-- name: CreateNetworkFull :one +const CreateNetworkFull = `-- name: CreateNetworkFull :one INSERT INTO networks ( name, platform, status, description, config, deployment_config, exposed_ports, domain, created_by, network_id, genesis_block_b64 @@ -529,16 +560,16 @@ type CreateNetworkFullParams struct { Status string `json:"status"` Description sql.NullString `json:"description"` Config sql.NullString `json:"config"` - DeploymentConfig sql.NullString `json:"deployment_config"` - ExposedPorts sql.NullString `json:"exposed_ports"` + DeploymentConfig sql.NullString `json:"deploymentConfig"` + ExposedPorts sql.NullString `json:"exposedPorts"` Domain sql.NullString `json:"domain"` - CreatedBy sql.NullInt64 `json:"created_by"` - NetworkID sql.NullString `json:"network_id"` - GenesisBlockB64 sql.NullString `json:"genesis_block_b64"` + CreatedBy sql.NullInt64 `json:"createdBy"` + NetworkID sql.NullString `json:"networkId"` + GenesisBlockB64 sql.NullString `json:"genesisBlockB64"` } -func (q *Queries) CreateNetworkFull(ctx context.Context, arg CreateNetworkFullParams) (Network, error) { - row := q.queryRow(ctx, q.createNetworkFullStmt, createNetworkFull, +func (q *Queries) CreateNetworkFull(ctx context.Context, arg *CreateNetworkFullParams) (*Network, error) { + row := q.db.QueryRowContext(ctx, CreateNetworkFull, arg.Name, arg.Platform, arg.Status, @@ -569,10 +600,10 @@ func (q *Queries) CreateNetworkFull(ctx context.Context, arg CreateNetworkFullPa &i.GenesisBlockB64, &i.CurrentConfigBlockB64, ) - return i, err + return &i, err } -const createNetworkNode = `-- name: CreateNetworkNode :one +const CreateNetworkNode = `-- name: CreateNetworkNode :one INSERT INTO network_nodes ( network_id, node_id, @@ -584,15 +615,15 @@ INSERT INTO network_nodes ( ` type CreateNetworkNodeParams struct { - NetworkID int64 `json:"network_id"` - NodeID int64 `json:"node_id"` + NetworkID int64 `json:"networkId"` + NodeID int64 `json:"nodeId"` Status string `json:"status"` Role string `json:"role"` } // Add queries for CRUD operations -func (q *Queries) CreateNetworkNode(ctx context.Context, arg CreateNetworkNodeParams) (NetworkNode, error) { - row := q.queryRow(ctx, q.createNetworkNodeStmt, createNetworkNode, +func (q *Queries) CreateNetworkNode(ctx context.Context, arg *CreateNetworkNodeParams) (*NetworkNode, error) { + row := q.db.QueryRowContext(ctx, CreateNetworkNode, arg.NetworkID, arg.NodeID, arg.Status, @@ -609,10 +640,10 @@ func (q *Queries) CreateNetworkNode(ctx context.Context, arg CreateNetworkNodePa &i.CreatedAt, &i.UpdatedAt, ) - return i, err + return &i, err } -const createNode = `-- name: CreateNode :one +const CreateNode = `-- name: CreateNode :one INSERT INTO nodes ( name, slug, @@ -649,7 +680,7 @@ INSERT INTO nodes ( ?, CURRENT_TIMESTAMP, CURRENT_TIMESTAMP -) RETURNING id, name, slug, platform, status, description, network_id, config, resources, endpoint, public_endpoint, p2p_address, created_at, created_by, updated_at, fabric_organization_id, node_type, node_config, deployment_config +) RETURNING id, name, slug, platform, status, description, network_id, config, resources, endpoint, public_endpoint, p2p_address, created_at, created_by, updated_at, fabric_organization_id, node_type, node_config, deployment_config, error_message ` type CreateNodeParams struct { @@ -658,20 +689,20 @@ type CreateNodeParams struct { Platform string `json:"platform"` Status string `json:"status"` Description sql.NullString `json:"description"` - NetworkID sql.NullInt64 `json:"network_id"` + NetworkID sql.NullInt64 `json:"networkId"` Config sql.NullString `json:"config"` Resources sql.NullString `json:"resources"` Endpoint sql.NullString `json:"endpoint"` - PublicEndpoint sql.NullString `json:"public_endpoint"` - P2pAddress sql.NullString `json:"p2p_address"` - CreatedBy sql.NullInt64 `json:"created_by"` - FabricOrganizationID sql.NullInt64 `json:"fabric_organization_id"` - NodeType sql.NullString `json:"node_type"` - NodeConfig sql.NullString `json:"node_config"` + PublicEndpoint sql.NullString `json:"publicEndpoint"` + P2pAddress sql.NullString `json:"p2pAddress"` + CreatedBy sql.NullInt64 `json:"createdBy"` + FabricOrganizationID sql.NullInt64 `json:"fabricOrganizationId"` + NodeType sql.NullString `json:"nodeType"` + NodeConfig sql.NullString `json:"nodeConfig"` } -func (q *Queries) CreateNode(ctx context.Context, arg CreateNodeParams) (Node, error) { - row := q.queryRow(ctx, q.createNodeStmt, createNode, +func (q *Queries) CreateNode(ctx context.Context, arg *CreateNodeParams) (*Node, error) { + row := q.db.QueryRowContext(ctx, CreateNode, arg.Name, arg.Slug, arg.Platform, @@ -709,11 +740,12 @@ func (q *Queries) CreateNode(ctx context.Context, arg CreateNodeParams) (Node, e &i.NodeType, &i.NodeConfig, &i.DeploymentConfig, + &i.ErrorMessage, ) - return i, err + return &i, err } -const createNodeEvent = `-- name: CreateNodeEvent :one +const CreateNodeEvent = `-- name: CreateNodeEvent :one INSERT INTO node_events ( node_id, event_type, @@ -727,15 +759,15 @@ RETURNING id, node_id, event_type, description, data, status, created_at ` type CreateNodeEventParams struct { - NodeID int64 `json:"node_id"` - EventType string `json:"event_type"` + NodeID int64 `json:"nodeId"` + EventType string `json:"eventType"` Description string `json:"description"` Data sql.NullString `json:"data"` Status string `json:"status"` } -func (q *Queries) CreateNodeEvent(ctx context.Context, arg CreateNodeEventParams) (NodeEvent, error) { - row := q.queryRow(ctx, q.createNodeEventStmt, createNodeEvent, +func (q *Queries) CreateNodeEvent(ctx context.Context, arg *CreateNodeEventParams) (*NodeEvent, error) { + row := q.db.QueryRowContext(ctx, CreateNodeEvent, arg.NodeID, arg.EventType, arg.Description, @@ -752,10 +784,10 @@ func (q *Queries) CreateNodeEvent(ctx context.Context, arg CreateNodeEventParams &i.Status, &i.CreatedAt, ) - return i, err + return &i, err } -const createNotificationProvider = `-- name: CreateNotificationProvider :one +const CreateNotificationProvider = `-- name: CreateNotificationProvider :one INSERT INTO notification_providers ( type, name, @@ -785,15 +817,15 @@ type CreateNotificationProviderParams struct { Type string `json:"type"` Name string `json:"name"` Config string `json:"config"` - IsDefault bool `json:"is_default"` - NotifyNodeDowntime bool `json:"notify_node_downtime"` - NotifyBackupSuccess bool `json:"notify_backup_success"` - NotifyBackupFailure bool `json:"notify_backup_failure"` - NotifyS3ConnectionIssue bool `json:"notify_s3_connection_issue"` + IsDefault bool `json:"isDefault"` + NotifyNodeDowntime bool `json:"notifyNodeDowntime"` + NotifyBackupSuccess bool `json:"notifyBackupSuccess"` + NotifyBackupFailure bool `json:"notifyBackupFailure"` + NotifyS3ConnectionIssue bool `json:"notifyS3ConnectionIssue"` } -func (q *Queries) CreateNotificationProvider(ctx context.Context, arg CreateNotificationProviderParams) (NotificationProvider, error) { - row := q.queryRow(ctx, q.createNotificationProviderStmt, createNotificationProvider, +func (q *Queries) CreateNotificationProvider(ctx context.Context, arg *CreateNotificationProviderParams) (*NotificationProvider, error) { + row := q.db.QueryRowContext(ctx, CreateNotificationProvider, arg.Type, arg.Name, arg.Config, @@ -821,10 +853,10 @@ func (q *Queries) CreateNotificationProvider(ctx context.Context, arg CreateNoti &i.LastTestStatus, &i.LastTestMessage, ) - return i, err + return &i, err } -const createSession = `-- name: CreateSession :one +const CreateSession = `-- name: CreateSession :one INSERT INTO sessions ( session_id, user_id, @@ -839,14 +871,14 @@ INSERT INTO sessions ( ` type CreateSessionParams struct { - SessionID string `json:"session_id"` - UserID int64 `json:"user_id"` + SessionID string `json:"sessionId"` + UserID int64 `json:"userId"` Token string `json:"token"` - ExpiresAt time.Time `json:"expires_at"` + ExpiresAt time.Time `json:"expiresAt"` } -func (q *Queries) CreateSession(ctx context.Context, arg CreateSessionParams) (Session, error) { - row := q.queryRow(ctx, q.createSessionStmt, createSession, +func (q *Queries) CreateSession(ctx context.Context, arg *CreateSessionParams) (*Session, error) { + row := q.db.QueryRowContext(ctx, CreateSession, arg.SessionID, arg.UserID, arg.Token, @@ -865,10 +897,31 @@ func (q *Queries) CreateSession(ctx context.Context, arg CreateSessionParams) (S &i.ExpiresAt, &i.LastActivityAt, ) - return i, err + return &i, err } -const createUser = `-- name: CreateUser :one +const CreateSetting = `-- name: CreateSetting :one +INSERT INTO settings ( + config +) VALUES ( + ? +) +RETURNING id, config, created_at, updated_at +` + +func (q *Queries) CreateSetting(ctx context.Context, config string) (*Setting, error) { + row := q.db.QueryRowContext(ctx, CreateSetting, config) + var i Setting + err := row.Scan( + &i.ID, + &i.Config, + &i.CreatedAt, + &i.UpdatedAt, + ) + return &i, err +} + +const CreateUser = `-- name: CreateUser :one INSERT INTO users ( username, password, created_at, last_login_at, updated_at ) VALUES ( @@ -882,8 +935,8 @@ type CreateUserParams struct { Password string `json:"password"` } -func (q *Queries) CreateUser(ctx context.Context, arg CreateUserParams) (User, error) { - row := q.queryRow(ctx, q.createUserStmt, createUser, arg.Username, arg.Password) +func (q *Queries) CreateUser(ctx context.Context, arg *CreateUserParams) (*User, error) { + row := q.db.QueryRowContext(ctx, CreateUser, arg.Username, arg.Password) var i User err := row.Scan( &i.ID, @@ -899,184 +952,209 @@ func (q *Queries) CreateUser(ctx context.Context, arg CreateUserParams) (User, e &i.LastLoginAt, &i.UpdatedAt, ) - return i, err + return &i, err } -const deleteBackup = `-- name: DeleteBackup :exec +const DeleteBackup = `-- name: DeleteBackup :exec DELETE FROM backups WHERE id = ? ` func (q *Queries) DeleteBackup(ctx context.Context, id int64) error { - _, err := q.exec(ctx, q.deleteBackupStmt, deleteBackup, id) + _, err := q.db.ExecContext(ctx, DeleteBackup, id) return err } -const deleteBackupSchedule = `-- name: DeleteBackupSchedule :exec +const DeleteBackupSchedule = `-- name: DeleteBackupSchedule :exec DELETE FROM backup_schedules WHERE id = ? ` func (q *Queries) DeleteBackupSchedule(ctx context.Context, id int64) error { - _, err := q.exec(ctx, q.deleteBackupScheduleStmt, deleteBackupSchedule, id) + _, err := q.db.ExecContext(ctx, DeleteBackupSchedule, id) return err } -const deleteBackupTarget = `-- name: DeleteBackupTarget :exec +const DeleteBackupTarget = `-- name: DeleteBackupTarget :exec DELETE FROM backup_targets WHERE id = ? ` func (q *Queries) DeleteBackupTarget(ctx context.Context, id int64) error { - _, err := q.exec(ctx, q.deleteBackupTargetStmt, deleteBackupTarget, id) + _, err := q.db.ExecContext(ctx, DeleteBackupTarget, id) return err } -const deleteBackupsBySchedule = `-- name: DeleteBackupsBySchedule :exec +const DeleteBackupsBySchedule = `-- name: DeleteBackupsBySchedule :exec DELETE FROM backups WHERE schedule_id = ? ` func (q *Queries) DeleteBackupsBySchedule(ctx context.Context, scheduleID sql.NullInt64) error { - _, err := q.exec(ctx, q.deleteBackupsByScheduleStmt, deleteBackupsBySchedule, scheduleID) + _, err := q.db.ExecContext(ctx, DeleteBackupsBySchedule, scheduleID) return err } -const deleteBackupsByTarget = `-- name: DeleteBackupsByTarget :exec +const DeleteBackupsByTarget = `-- name: DeleteBackupsByTarget :exec DELETE FROM backups WHERE target_id = ? ` func (q *Queries) DeleteBackupsByTarget(ctx context.Context, targetID int64) error { - _, err := q.exec(ctx, q.deleteBackupsByTargetStmt, deleteBackupsByTarget, targetID) + _, err := q.db.ExecContext(ctx, DeleteBackupsByTarget, targetID) return err } -const deleteExpiredSessions = `-- name: DeleteExpiredSessions :exec +const DeleteExpiredSessions = `-- name: DeleteExpiredSessions :exec DELETE FROM sessions WHERE expires_at <= CURRENT_TIMESTAMP ` func (q *Queries) DeleteExpiredSessions(ctx context.Context) error { - _, err := q.exec(ctx, q.deleteExpiredSessionsStmt, deleteExpiredSessions) + _, err := q.db.ExecContext(ctx, DeleteExpiredSessions) return err } -const deleteFabricOrganization = `-- name: DeleteFabricOrganization :exec +const DeleteFabricOrganization = `-- name: DeleteFabricOrganization :exec DELETE FROM fabric_organizations WHERE id = ? ` func (q *Queries) DeleteFabricOrganization(ctx context.Context, id int64) error { - _, err := q.exec(ctx, q.deleteFabricOrganizationStmt, deleteFabricOrganization, id) + _, err := q.db.ExecContext(ctx, DeleteFabricOrganization, id) return err } -const deleteKey = `-- name: DeleteKey :exec +const DeleteKey = `-- name: DeleteKey :exec DELETE FROM keys WHERE id = ? ` func (q *Queries) DeleteKey(ctx context.Context, id int64) error { - _, err := q.exec(ctx, q.deleteKeyStmt, deleteKey, id) + _, err := q.db.ExecContext(ctx, DeleteKey, id) return err } -const deleteKeyProvider = `-- name: DeleteKeyProvider :exec +const DeleteKeyProvider = `-- name: DeleteKeyProvider :exec DELETE FROM key_providers WHERE id = ? ` func (q *Queries) DeleteKeyProvider(ctx context.Context, id int64) error { - _, err := q.exec(ctx, q.deleteKeyProviderStmt, deleteKeyProvider, id) + _, err := q.db.ExecContext(ctx, DeleteKeyProvider, id) return err } -const deleteNetwork = `-- name: DeleteNetwork :exec +const DeleteNetwork = `-- name: DeleteNetwork :exec DELETE FROM networks WHERE id = ? ` func (q *Queries) DeleteNetwork(ctx context.Context, id int64) error { - _, err := q.exec(ctx, q.deleteNetworkStmt, deleteNetwork, id) + _, err := q.db.ExecContext(ctx, DeleteNetwork, id) return err } -const deleteNetworkNode = `-- name: DeleteNetworkNode :exec +const DeleteNetworkNode = `-- name: DeleteNetworkNode :exec DELETE FROM network_nodes WHERE network_id = ? AND node_id = ? ` type DeleteNetworkNodeParams struct { - NetworkID int64 `json:"network_id"` - NodeID int64 `json:"node_id"` + NetworkID int64 `json:"networkId"` + NodeID int64 `json:"nodeId"` } -func (q *Queries) DeleteNetworkNode(ctx context.Context, arg DeleteNetworkNodeParams) error { - _, err := q.exec(ctx, q.deleteNetworkNodeStmt, deleteNetworkNode, arg.NetworkID, arg.NodeID) +func (q *Queries) DeleteNetworkNode(ctx context.Context, arg *DeleteNetworkNodeParams) error { + _, err := q.db.ExecContext(ctx, DeleteNetworkNode, arg.NetworkID, arg.NodeID) return err } -const deleteNode = `-- name: DeleteNode :exec +const DeleteNode = `-- name: DeleteNode :exec DELETE FROM nodes WHERE id = ? ` func (q *Queries) DeleteNode(ctx context.Context, id int64) error { - _, err := q.exec(ctx, q.deleteNodeStmt, deleteNode, id) + _, err := q.db.ExecContext(ctx, DeleteNode, id) return err } -const deleteNotificationProvider = `-- name: DeleteNotificationProvider :exec +const DeleteNotificationProvider = `-- name: DeleteNotificationProvider :exec DELETE FROM notification_providers WHERE id = ? ` func (q *Queries) DeleteNotificationProvider(ctx context.Context, id int64) error { - _, err := q.exec(ctx, q.deleteNotificationProviderStmt, deleteNotificationProvider, id) + _, err := q.db.ExecContext(ctx, DeleteNotificationProvider, id) return err } -const deleteOldBackups = `-- name: DeleteOldBackups :exec +const DeleteOldBackups = `-- name: DeleteOldBackups :exec DELETE FROM backups WHERE target_id = ? AND created_at < ? ` type DeleteOldBackupsParams struct { - TargetID int64 `json:"target_id"` - CreatedAt time.Time `json:"created_at"` + TargetID int64 `json:"targetId"` + CreatedAt time.Time `json:"createdAt"` +} + +func (q *Queries) DeleteOldBackups(ctx context.Context, arg *DeleteOldBackupsParams) error { + _, err := q.db.ExecContext(ctx, DeleteOldBackups, arg.TargetID, arg.CreatedAt) + return err +} + +const DeleteRevokedCertificate = `-- name: DeleteRevokedCertificate :exec +DELETE FROM fabric_revoked_certificates +WHERE fabric_organization_id = ? AND serial_number = ? +` + +type DeleteRevokedCertificateParams struct { + FabricOrganizationID int64 `json:"fabricOrganizationId"` + SerialNumber string `json:"serialNumber"` } -func (q *Queries) DeleteOldBackups(ctx context.Context, arg DeleteOldBackupsParams) error { - _, err := q.exec(ctx, q.deleteOldBackupsStmt, deleteOldBackups, arg.TargetID, arg.CreatedAt) +func (q *Queries) DeleteRevokedCertificate(ctx context.Context, arg *DeleteRevokedCertificateParams) error { + _, err := q.db.ExecContext(ctx, DeleteRevokedCertificate, arg.FabricOrganizationID, arg.SerialNumber) return err } -const deleteSession = `-- name: DeleteSession :exec +const DeleteSession = `-- name: DeleteSession :exec DELETE FROM sessions WHERE session_id = ? ` func (q *Queries) DeleteSession(ctx context.Context, sessionID string) error { - _, err := q.exec(ctx, q.deleteSessionStmt, deleteSession, sessionID) + _, err := q.db.ExecContext(ctx, DeleteSession, sessionID) return err } -const deleteUser = `-- name: DeleteUser :exec +const DeleteSetting = `-- name: DeleteSetting :exec +DELETE FROM settings +WHERE id = ? +` + +func (q *Queries) DeleteSetting(ctx context.Context, id int64) error { + _, err := q.db.ExecContext(ctx, DeleteSetting, id) + return err +} + +const DeleteUser = `-- name: DeleteUser :exec DELETE FROM users WHERE id = ? ` func (q *Queries) DeleteUser(ctx context.Context, id int64) error { - _, err := q.exec(ctx, q.deleteUserStmt, deleteUser, id) + _, err := q.db.ExecContext(ctx, DeleteUser, id) return err } -const deleteUserSessions = `-- name: DeleteUserSessions :exec +const DeleteUserSessions = `-- name: DeleteUserSessions :exec DELETE FROM sessions WHERE user_id = ? ` func (q *Queries) DeleteUserSessions(ctx context.Context, userID int64) error { - _, err := q.exec(ctx, q.deleteUserSessionsStmt, deleteUserSessions, userID) + _, err := q.db.ExecContext(ctx, DeleteUserSessions, userID) return err } -const disableBackupSchedule = `-- name: DisableBackupSchedule :one +const DisableBackupSchedule = `-- name: DisableBackupSchedule :one UPDATE backup_schedules SET enabled = false, updated_at = CURRENT_TIMESTAMP @@ -1084,8 +1162,8 @@ WHERE id = ? RETURNING id, name, description, cron_expression, target_id, retention_days, enabled, created_at, updated_at, last_run_at, next_run_at ` -func (q *Queries) DisableBackupSchedule(ctx context.Context, id int64) (BackupSchedule, error) { - row := q.queryRow(ctx, q.disableBackupScheduleStmt, disableBackupSchedule, id) +func (q *Queries) DisableBackupSchedule(ctx context.Context, id int64) (*BackupSchedule, error) { + row := q.db.QueryRowContext(ctx, DisableBackupSchedule, id) var i BackupSchedule err := row.Scan( &i.ID, @@ -1100,10 +1178,10 @@ func (q *Queries) DisableBackupSchedule(ctx context.Context, id int64) (BackupSc &i.LastRunAt, &i.NextRunAt, ) - return i, err + return &i, err } -const enableBackupSchedule = `-- name: EnableBackupSchedule :one +const EnableBackupSchedule = `-- name: EnableBackupSchedule :one UPDATE backup_schedules SET enabled = true, updated_at = CURRENT_TIMESTAMP @@ -1111,8 +1189,8 @@ WHERE id = ? RETURNING id, name, description, cron_expression, target_id, retention_days, enabled, created_at, updated_at, last_run_at, next_run_at ` -func (q *Queries) EnableBackupSchedule(ctx context.Context, id int64) (BackupSchedule, error) { - row := q.queryRow(ctx, q.enableBackupScheduleStmt, enableBackupSchedule, id) +func (q *Queries) EnableBackupSchedule(ctx context.Context, id int64) (*BackupSchedule, error) { + row := q.db.QueryRowContext(ctx, EnableBackupSchedule, id) var i BackupSchedule err := row.Scan( &i.ID, @@ -1127,10 +1205,10 @@ func (q *Queries) EnableBackupSchedule(ctx context.Context, id int64) (BackupSch &i.LastRunAt, &i.NextRunAt, ) - return i, err + return &i, err } -const getAllKeys = `-- name: GetAllKeys :many +const GetAllKeys = `-- name: GetAllKeys :many SELECT k.id, k.name, k.description, k.algorithm, k.key_size, k.curve, k.format, k.public_key, k.private_key, k.certificate, k.status, k.created_at, k.updated_at, k.expires_at, k.last_rotated_at, k.signing_key_id, k.sha256_fingerprint, k.sha1_fingerprint, k.provider_id, k.user_id, k.is_ca, k.ethereum_address, kp.name as provider_name, kp.type as provider_type FROM keys k JOIN key_providers kp ON k.provider_id = kp.id @@ -1138,8 +1216,8 @@ WHERE (? IS NULL OR k.provider_id = ?) ` type GetAllKeysParams struct { - Column1 interface{} `json:"column_1"` - ProviderID int64 `json:"provider_id"` + Column1 interface{} `json:"column1"` + ProviderID int64 `json:"providerId"` } type GetAllKeysRow struct { @@ -1147,35 +1225,35 @@ type GetAllKeysRow struct { Name string `json:"name"` Description sql.NullString `json:"description"` Algorithm string `json:"algorithm"` - KeySize sql.NullInt64 `json:"key_size"` + KeySize sql.NullInt64 `json:"keySize"` Curve sql.NullString `json:"curve"` Format string `json:"format"` - PublicKey string `json:"public_key"` - PrivateKey string `json:"private_key"` + PublicKey string `json:"publicKey"` + PrivateKey string `json:"privateKey"` Certificate sql.NullString `json:"certificate"` Status string `json:"status"` - CreatedAt time.Time `json:"created_at"` - UpdatedAt time.Time `json:"updated_at"` - ExpiresAt sql.NullTime `json:"expires_at"` - LastRotatedAt sql.NullTime `json:"last_rotated_at"` - SigningKeyID sql.NullInt64 `json:"signing_key_id"` - Sha256Fingerprint string `json:"sha256_fingerprint"` - Sha1Fingerprint string `json:"sha1_fingerprint"` - ProviderID int64 `json:"provider_id"` - UserID int64 `json:"user_id"` - IsCa int64 `json:"is_ca"` - EthereumAddress sql.NullString `json:"ethereum_address"` - ProviderName string `json:"provider_name"` - ProviderType string `json:"provider_type"` -} - -func (q *Queries) GetAllKeys(ctx context.Context, arg GetAllKeysParams) ([]GetAllKeysRow, error) { - rows, err := q.query(ctx, q.getAllKeysStmt, getAllKeys, arg.Column1, arg.ProviderID) + CreatedAt time.Time `json:"createdAt"` + UpdatedAt time.Time `json:"updatedAt"` + ExpiresAt sql.NullTime `json:"expiresAt"` + LastRotatedAt sql.NullTime `json:"lastRotatedAt"` + SigningKeyID sql.NullInt64 `json:"signingKeyId"` + Sha256Fingerprint string `json:"sha256Fingerprint"` + Sha1Fingerprint string `json:"sha1Fingerprint"` + ProviderID int64 `json:"providerId"` + UserID int64 `json:"userId"` + IsCa int64 `json:"isCa"` + EthereumAddress sql.NullString `json:"ethereumAddress"` + ProviderName string `json:"providerName"` + ProviderType string `json:"providerType"` +} + +func (q *Queries) GetAllKeys(ctx context.Context, arg *GetAllKeysParams) ([]*GetAllKeysRow, error) { + rows, err := q.db.QueryContext(ctx, GetAllKeys, arg.Column1, arg.ProviderID) if err != nil { return nil, err } defer rows.Close() - items := []GetAllKeysRow{} + items := []*GetAllKeysRow{} for rows.Next() { var i GetAllKeysRow if err := rows.Scan( @@ -1206,7 +1284,7 @@ func (q *Queries) GetAllKeys(ctx context.Context, arg GetAllKeysParams) ([]GetAl ); err != nil { return nil, err } - items = append(items, i) + items = append(items, &i) } if err := rows.Close(); err != nil { return nil, err @@ -1217,17 +1295,17 @@ func (q *Queries) GetAllKeys(ctx context.Context, arg GetAllKeysParams) ([]GetAl return items, nil } -const getAllNodes = `-- name: GetAllNodes :many -SELECT id, name, slug, platform, status, description, network_id, config, resources, endpoint, public_endpoint, p2p_address, created_at, created_by, updated_at, fabric_organization_id, node_type, node_config, deployment_config FROM nodes +const GetAllNodes = `-- name: GetAllNodes :many +SELECT id, name, slug, platform, status, description, network_id, config, resources, endpoint, public_endpoint, p2p_address, created_at, created_by, updated_at, fabric_organization_id, node_type, node_config, deployment_config, error_message FROM nodes ` -func (q *Queries) GetAllNodes(ctx context.Context) ([]Node, error) { - rows, err := q.query(ctx, q.getAllNodesStmt, getAllNodes) +func (q *Queries) GetAllNodes(ctx context.Context) ([]*Node, error) { + rows, err := q.db.QueryContext(ctx, GetAllNodes) if err != nil { return nil, err } defer rows.Close() - items := []Node{} + items := []*Node{} for rows.Next() { var i Node if err := rows.Scan( @@ -1250,10 +1328,11 @@ func (q *Queries) GetAllNodes(ctx context.Context) ([]Node, error) { &i.NodeType, &i.NodeConfig, &i.DeploymentConfig, + &i.ErrorMessage, ); err != nil { return nil, err } - items = append(items, i) + items = append(items, &i) } if err := rows.Close(); err != nil { return nil, err @@ -1264,13 +1343,13 @@ func (q *Queries) GetAllNodes(ctx context.Context) ([]Node, error) { return items, nil } -const getBackup = `-- name: GetBackup :one +const GetBackup = `-- name: GetBackup :one SELECT id, schedule_id, target_id, status, size_bytes, started_at, completed_at, error_message, created_at, notification_sent FROM backups WHERE id = ? LIMIT 1 ` -func (q *Queries) GetBackup(ctx context.Context, id int64) (Backup, error) { - row := q.queryRow(ctx, q.getBackupStmt, getBackup, id) +func (q *Queries) GetBackup(ctx context.Context, id int64) (*Backup, error) { + row := q.db.QueryRowContext(ctx, GetBackup, id) var i Backup err := row.Scan( &i.ID, @@ -1284,16 +1363,16 @@ func (q *Queries) GetBackup(ctx context.Context, id int64) (Backup, error) { &i.CreatedAt, &i.NotificationSent, ) - return i, err + return &i, err } -const getBackupSchedule = `-- name: GetBackupSchedule :one +const GetBackupSchedule = `-- name: GetBackupSchedule :one SELECT id, name, description, cron_expression, target_id, retention_days, enabled, created_at, updated_at, last_run_at, next_run_at FROM backup_schedules WHERE id = ? LIMIT 1 ` -func (q *Queries) GetBackupSchedule(ctx context.Context, id int64) (BackupSchedule, error) { - row := q.queryRow(ctx, q.getBackupScheduleStmt, getBackupSchedule, id) +func (q *Queries) GetBackupSchedule(ctx context.Context, id int64) (*BackupSchedule, error) { + row := q.db.QueryRowContext(ctx, GetBackupSchedule, id) var i BackupSchedule err := row.Scan( &i.ID, @@ -1308,16 +1387,16 @@ func (q *Queries) GetBackupSchedule(ctx context.Context, id int64) (BackupSchedu &i.LastRunAt, &i.NextRunAt, ) - return i, err + return &i, err } -const getBackupTarget = `-- name: GetBackupTarget :one +const GetBackupTarget = `-- name: GetBackupTarget :one SELECT id, name, bucket_name, region, endpoint, bucket_path, access_key_id, secret_key, s3_path_style, restic_password, type, created_at, updated_at FROM backup_targets WHERE id = ? LIMIT 1 ` -func (q *Queries) GetBackupTarget(ctx context.Context, id int64) (BackupTarget, error) { - row := q.queryRow(ctx, q.getBackupTargetStmt, getBackupTarget, id) +func (q *Queries) GetBackupTarget(ctx context.Context, id int64) (*BackupTarget, error) { + row := q.db.QueryRowContext(ctx, GetBackupTarget, id) var i BackupTarget err := row.Scan( &i.ID, @@ -1334,27 +1413,27 @@ func (q *Queries) GetBackupTarget(ctx context.Context, id int64) (BackupTarget, &i.CreatedAt, &i.UpdatedAt, ) - return i, err + return &i, err } -const getBackupsByDateRange = `-- name: GetBackupsByDateRange :many +const GetBackupsByDateRange = `-- name: GetBackupsByDateRange :many SELECT id, schedule_id, target_id, status, size_bytes, started_at, completed_at, error_message, created_at, notification_sent FROM backups WHERE created_at BETWEEN ? AND ? ORDER BY created_at DESC ` type GetBackupsByDateRangeParams struct { - FromCreatedAt time.Time `json:"from_created_at"` - ToCreatedAt time.Time `json:"to_created_at"` + FromCreatedAt time.Time `json:"fromCreatedAt"` + ToCreatedAt time.Time `json:"toCreatedAt"` } -func (q *Queries) GetBackupsByDateRange(ctx context.Context, arg GetBackupsByDateRangeParams) ([]Backup, error) { - rows, err := q.query(ctx, q.getBackupsByDateRangeStmt, getBackupsByDateRange, arg.FromCreatedAt, arg.ToCreatedAt) +func (q *Queries) GetBackupsByDateRange(ctx context.Context, arg *GetBackupsByDateRangeParams) ([]*Backup, error) { + rows, err := q.db.QueryContext(ctx, GetBackupsByDateRange, arg.FromCreatedAt, arg.ToCreatedAt) if err != nil { return nil, err } defer rows.Close() - items := []Backup{} + items := []*Backup{} for rows.Next() { var i Backup if err := rows.Scan( @@ -1371,7 +1450,7 @@ func (q *Queries) GetBackupsByDateRange(ctx context.Context, arg GetBackupsByDat ); err != nil { return nil, err } - items = append(items, i) + items = append(items, &i) } if err := rows.Close(); err != nil { return nil, err @@ -1382,24 +1461,24 @@ func (q *Queries) GetBackupsByDateRange(ctx context.Context, arg GetBackupsByDat return items, nil } -const getBackupsByScheduleAndStatus = `-- name: GetBackupsByScheduleAndStatus :many +const GetBackupsByScheduleAndStatus = `-- name: GetBackupsByScheduleAndStatus :many SELECT id, schedule_id, target_id, status, size_bytes, started_at, completed_at, error_message, created_at, notification_sent FROM backups WHERE schedule_id = ? AND status = ? ORDER BY created_at DESC ` type GetBackupsByScheduleAndStatusParams struct { - ScheduleID sql.NullInt64 `json:"schedule_id"` + ScheduleID sql.NullInt64 `json:"scheduleId"` Status string `json:"status"` } -func (q *Queries) GetBackupsByScheduleAndStatus(ctx context.Context, arg GetBackupsByScheduleAndStatusParams) ([]Backup, error) { - rows, err := q.query(ctx, q.getBackupsByScheduleAndStatusStmt, getBackupsByScheduleAndStatus, arg.ScheduleID, arg.Status) +func (q *Queries) GetBackupsByScheduleAndStatus(ctx context.Context, arg *GetBackupsByScheduleAndStatusParams) ([]*Backup, error) { + rows, err := q.db.QueryContext(ctx, GetBackupsByScheduleAndStatus, arg.ScheduleID, arg.Status) if err != nil { return nil, err } defer rows.Close() - items := []Backup{} + items := []*Backup{} for rows.Next() { var i Backup if err := rows.Scan( @@ -1416,7 +1495,7 @@ func (q *Queries) GetBackupsByScheduleAndStatus(ctx context.Context, arg GetBack ); err != nil { return nil, err } - items = append(items, i) + items = append(items, &i) } if err := rows.Close(); err != nil { return nil, err @@ -1427,19 +1506,19 @@ func (q *Queries) GetBackupsByScheduleAndStatus(ctx context.Context, arg GetBack return items, nil } -const getBackupsByStatus = `-- name: GetBackupsByStatus :many +const GetBackupsByStatus = `-- name: GetBackupsByStatus :many SELECT id, schedule_id, target_id, status, size_bytes, started_at, completed_at, error_message, created_at, notification_sent FROM backups WHERE status = ? ORDER BY created_at DESC ` -func (q *Queries) GetBackupsByStatus(ctx context.Context, status string) ([]Backup, error) { - rows, err := q.query(ctx, q.getBackupsByStatusStmt, getBackupsByStatus, status) +func (q *Queries) GetBackupsByStatus(ctx context.Context, status string) ([]*Backup, error) { + rows, err := q.db.QueryContext(ctx, GetBackupsByStatus, status) if err != nil { return nil, err } defer rows.Close() - items := []Backup{} + items := []*Backup{} for rows.Next() { var i Backup if err := rows.Scan( @@ -1456,7 +1535,7 @@ func (q *Queries) GetBackupsByStatus(ctx context.Context, status string) ([]Back ); err != nil { return nil, err } - items = append(items, i) + items = append(items, &i) } if err := rows.Close(); err != nil { return nil, err @@ -1467,14 +1546,14 @@ func (q *Queries) GetBackupsByStatus(ctx context.Context, status string) ([]Back return items, nil } -const getDefaultNotificationProvider = `-- name: GetDefaultNotificationProvider :one +const GetDefaultNotificationProvider = `-- name: GetDefaultNotificationProvider :one SELECT id, name, type, config, is_default, is_enabled, created_at, updated_at, notify_node_downtime, notify_backup_success, notify_backup_failure, notify_s3_connection_issue, last_test_at, last_test_status, last_test_message FROM notification_providers WHERE is_default = 1 AND type = ? LIMIT 1 ` -func (q *Queries) GetDefaultNotificationProvider(ctx context.Context, type_ string) (NotificationProvider, error) { - row := q.queryRow(ctx, q.getDefaultNotificationProviderStmt, getDefaultNotificationProvider, type_) +func (q *Queries) GetDefaultNotificationProvider(ctx context.Context, type_ string) (*NotificationProvider, error) { + row := q.db.QueryRowContext(ctx, GetDefaultNotificationProvider, type_) var i NotificationProvider err := row.Scan( &i.ID, @@ -1493,10 +1572,10 @@ func (q *Queries) GetDefaultNotificationProvider(ctx context.Context, type_ stri &i.LastTestStatus, &i.LastTestMessage, ) - return i, err + return &i, err } -const getDefaultNotificationProviderForType = `-- name: GetDefaultNotificationProviderForType :one +const GetDefaultNotificationProviderForType = `-- name: GetDefaultNotificationProviderForType :one SELECT id, name, type, config, is_default, is_enabled, created_at, updated_at, notify_node_downtime, notify_backup_success, notify_backup_failure, notify_s3_connection_issue, last_test_at, last_test_status, last_test_message FROM notification_providers WHERE is_default = true AND ( @@ -1508,8 +1587,8 @@ WHERE is_default = true LIMIT 1 ` -func (q *Queries) GetDefaultNotificationProviderForType(ctx context.Context, notificationType interface{}) (NotificationProvider, error) { - row := q.queryRow(ctx, q.getDefaultNotificationProviderForTypeStmt, getDefaultNotificationProviderForType, notificationType) +func (q *Queries) GetDefaultNotificationProviderForType(ctx context.Context, notificationType interface{}) (*NotificationProvider, error) { + row := q.db.QueryRowContext(ctx, GetDefaultNotificationProviderForType, notificationType) var i NotificationProvider err := row.Scan( &i.ID, @@ -1528,16 +1607,16 @@ func (q *Queries) GetDefaultNotificationProviderForType(ctx context.Context, not &i.LastTestStatus, &i.LastTestMessage, ) - return i, err + return &i, err } -const getFabricOrganization = `-- name: GetFabricOrganization :one -SELECT id, msp_id, description, config, ca_config, sign_key_id, tls_root_key_id, admin_tls_key_id, admin_sign_key_id, client_sign_key_id, provider_id, created_at, created_by, updated_at FROM fabric_organizations +const GetFabricOrganization = `-- name: GetFabricOrganization :one +SELECT id, msp_id, description, config, ca_config, sign_key_id, tls_root_key_id, admin_tls_key_id, admin_sign_key_id, client_sign_key_id, provider_id, created_at, created_by, updated_at, crl_key_id, crl_last_update FROM fabric_organizations WHERE id = ? LIMIT 1 ` -func (q *Queries) GetFabricOrganization(ctx context.Context, id int64) (FabricOrganization, error) { - row := q.queryRow(ctx, q.getFabricOrganizationStmt, getFabricOrganization, id) +func (q *Queries) GetFabricOrganization(ctx context.Context, id int64) (*FabricOrganization, error) { + row := q.db.QueryRowContext(ctx, GetFabricOrganization, id) var i FabricOrganization err := row.Scan( &i.ID, @@ -1554,16 +1633,18 @@ func (q *Queries) GetFabricOrganization(ctx context.Context, id int64) (FabricOr &i.CreatedAt, &i.CreatedBy, &i.UpdatedAt, + &i.CrlKeyID, + &i.CrlLastUpdate, ) - return i, err + return &i, err } -const getFabricOrganizationByID = `-- name: GetFabricOrganizationByID :one -SELECT id, msp_id, description, config, ca_config, sign_key_id, tls_root_key_id, admin_tls_key_id, admin_sign_key_id, client_sign_key_id, provider_id, created_at, created_by, updated_at FROM fabric_organizations WHERE id = ? LIMIT 1 +const GetFabricOrganizationByID = `-- name: GetFabricOrganizationByID :one +SELECT id, msp_id, description, config, ca_config, sign_key_id, tls_root_key_id, admin_tls_key_id, admin_sign_key_id, client_sign_key_id, provider_id, created_at, created_by, updated_at, crl_key_id, crl_last_update FROM fabric_organizations WHERE id = ? LIMIT 1 ` -func (q *Queries) GetFabricOrganizationByID(ctx context.Context, id int64) (FabricOrganization, error) { - row := q.queryRow(ctx, q.getFabricOrganizationByIDStmt, getFabricOrganizationByID, id) +func (q *Queries) GetFabricOrganizationByID(ctx context.Context, id int64) (*FabricOrganization, error) { + row := q.db.QueryRowContext(ctx, GetFabricOrganizationByID, id) var i FabricOrganization err := row.Scan( &i.ID, @@ -1580,17 +1661,19 @@ func (q *Queries) GetFabricOrganizationByID(ctx context.Context, id int64) (Fabr &i.CreatedAt, &i.CreatedBy, &i.UpdatedAt, + &i.CrlKeyID, + &i.CrlLastUpdate, ) - return i, err + return &i, err } -const getFabricOrganizationByMSPID = `-- name: GetFabricOrganizationByMSPID :one -SELECT id, msp_id, description, config, ca_config, sign_key_id, tls_root_key_id, admin_tls_key_id, admin_sign_key_id, client_sign_key_id, provider_id, created_at, created_by, updated_at FROM fabric_organizations +const GetFabricOrganizationByMSPID = `-- name: GetFabricOrganizationByMSPID :one +SELECT id, msp_id, description, config, ca_config, sign_key_id, tls_root_key_id, admin_tls_key_id, admin_sign_key_id, client_sign_key_id, provider_id, created_at, created_by, updated_at, crl_key_id, crl_last_update FROM fabric_organizations WHERE msp_id = ? LIMIT 1 ` -func (q *Queries) GetFabricOrganizationByMSPID(ctx context.Context, mspID string) (FabricOrganization, error) { - row := q.queryRow(ctx, q.getFabricOrganizationByMSPIDStmt, getFabricOrganizationByMSPID, mspID) +func (q *Queries) GetFabricOrganizationByMSPID(ctx context.Context, mspID string) (*FabricOrganization, error) { + row := q.db.QueryRowContext(ctx, GetFabricOrganizationByMSPID, mspID) var i FabricOrganization err := row.Scan( &i.ID, @@ -1607,13 +1690,15 @@ func (q *Queries) GetFabricOrganizationByMSPID(ctx context.Context, mspID string &i.CreatedAt, &i.CreatedBy, &i.UpdatedAt, + &i.CrlKeyID, + &i.CrlLastUpdate, ) - return i, err + return &i, err } -const getFabricOrganizationByMspID = `-- name: GetFabricOrganizationByMspID :one +const GetFabricOrganizationByMspID = `-- name: GetFabricOrganizationByMspID :one SELECT - fo.id, fo.msp_id, fo.description, fo.config, fo.ca_config, fo.sign_key_id, fo.tls_root_key_id, fo.admin_tls_key_id, fo.admin_sign_key_id, fo.client_sign_key_id, fo.provider_id, fo.created_at, fo.created_by, fo.updated_at, + fo.id, fo.msp_id, fo.description, fo.config, fo.ca_config, fo.sign_key_id, fo.tls_root_key_id, fo.admin_tls_key_id, fo.admin_sign_key_id, fo.client_sign_key_id, fo.provider_id, fo.created_at, fo.created_by, fo.updated_at, fo.crl_key_id, fo.crl_last_update, sk.public_key as sign_public_key, sk.certificate as sign_certificate, tk.public_key as tls_public_key, @@ -1628,28 +1713,30 @@ WHERE fo.msp_id = ? type GetFabricOrganizationByMspIDRow struct { ID int64 `json:"id"` - MspID string `json:"msp_id"` + MspID string `json:"mspId"` Description sql.NullString `json:"description"` Config sql.NullString `json:"config"` - CaConfig sql.NullString `json:"ca_config"` - SignKeyID sql.NullInt64 `json:"sign_key_id"` - TlsRootKeyID sql.NullInt64 `json:"tls_root_key_id"` - AdminTlsKeyID sql.NullInt64 `json:"admin_tls_key_id"` - AdminSignKeyID sql.NullInt64 `json:"admin_sign_key_id"` - ClientSignKeyID sql.NullInt64 `json:"client_sign_key_id"` - ProviderID sql.NullInt64 `json:"provider_id"` - CreatedAt time.Time `json:"created_at"` - CreatedBy sql.NullInt64 `json:"created_by"` - UpdatedAt sql.NullTime `json:"updated_at"` - SignPublicKey sql.NullString `json:"sign_public_key"` - SignCertificate sql.NullString `json:"sign_certificate"` - TlsPublicKey sql.NullString `json:"tls_public_key"` - TlsCertificate sql.NullString `json:"tls_certificate"` - ProviderName sql.NullString `json:"provider_name"` -} - -func (q *Queries) GetFabricOrganizationByMspID(ctx context.Context, mspID string) (GetFabricOrganizationByMspIDRow, error) { - row := q.queryRow(ctx, q.getFabricOrganizationByMspIDStmt, getFabricOrganizationByMspID, mspID) + CaConfig sql.NullString `json:"caConfig"` + SignKeyID sql.NullInt64 `json:"signKeyId"` + TlsRootKeyID sql.NullInt64 `json:"tlsRootKeyId"` + AdminTlsKeyID sql.NullInt64 `json:"adminTlsKeyId"` + AdminSignKeyID sql.NullInt64 `json:"adminSignKeyId"` + ClientSignKeyID sql.NullInt64 `json:"clientSignKeyId"` + ProviderID sql.NullInt64 `json:"providerId"` + CreatedAt time.Time `json:"createdAt"` + CreatedBy sql.NullInt64 `json:"createdBy"` + UpdatedAt sql.NullTime `json:"updatedAt"` + CrlKeyID sql.NullInt64 `json:"crlKeyId"` + CrlLastUpdate sql.NullTime `json:"crlLastUpdate"` + SignPublicKey sql.NullString `json:"signPublicKey"` + SignCertificate sql.NullString `json:"signCertificate"` + TlsPublicKey sql.NullString `json:"tlsPublicKey"` + TlsCertificate sql.NullString `json:"tlsCertificate"` + ProviderName sql.NullString `json:"providerName"` +} + +func (q *Queries) GetFabricOrganizationByMspID(ctx context.Context, mspID string) (*GetFabricOrganizationByMspIDRow, error) { + row := q.db.QueryRowContext(ctx, GetFabricOrganizationByMspID, mspID) var i GetFabricOrganizationByMspIDRow err := row.Scan( &i.ID, @@ -1666,18 +1753,20 @@ func (q *Queries) GetFabricOrganizationByMspID(ctx context.Context, mspID string &i.CreatedAt, &i.CreatedBy, &i.UpdatedAt, + &i.CrlKeyID, + &i.CrlLastUpdate, &i.SignPublicKey, &i.SignCertificate, &i.TlsPublicKey, &i.TlsCertificate, &i.ProviderName, ) - return i, err + return &i, err } -const getFabricOrganizationWithKeys = `-- name: GetFabricOrganizationWithKeys :one +const GetFabricOrganizationWithKeys = `-- name: GetFabricOrganizationWithKeys :one SELECT - fo.id, fo.msp_id, fo.description, fo.config, fo.ca_config, fo.sign_key_id, fo.tls_root_key_id, fo.admin_tls_key_id, fo.admin_sign_key_id, fo.client_sign_key_id, fo.provider_id, fo.created_at, fo.created_by, fo.updated_at, + fo.id, fo.msp_id, fo.description, fo.config, fo.ca_config, fo.sign_key_id, fo.tls_root_key_id, fo.admin_tls_key_id, fo.admin_sign_key_id, fo.client_sign_key_id, fo.provider_id, fo.created_at, fo.created_by, fo.updated_at, fo.crl_key_id, fo.crl_last_update, sk.public_key as sign_public_key, sk.certificate as sign_certificate, tk.public_key as tls_public_key, @@ -1692,28 +1781,30 @@ WHERE fo.id = ? type GetFabricOrganizationWithKeysRow struct { ID int64 `json:"id"` - MspID string `json:"msp_id"` + MspID string `json:"mspId"` Description sql.NullString `json:"description"` Config sql.NullString `json:"config"` - CaConfig sql.NullString `json:"ca_config"` - SignKeyID sql.NullInt64 `json:"sign_key_id"` - TlsRootKeyID sql.NullInt64 `json:"tls_root_key_id"` - AdminTlsKeyID sql.NullInt64 `json:"admin_tls_key_id"` - AdminSignKeyID sql.NullInt64 `json:"admin_sign_key_id"` - ClientSignKeyID sql.NullInt64 `json:"client_sign_key_id"` - ProviderID sql.NullInt64 `json:"provider_id"` - CreatedAt time.Time `json:"created_at"` - CreatedBy sql.NullInt64 `json:"created_by"` - UpdatedAt sql.NullTime `json:"updated_at"` - SignPublicKey sql.NullString `json:"sign_public_key"` - SignCertificate sql.NullString `json:"sign_certificate"` - TlsPublicKey sql.NullString `json:"tls_public_key"` - TlsCertificate sql.NullString `json:"tls_certificate"` - ProviderName sql.NullString `json:"provider_name"` -} - -func (q *Queries) GetFabricOrganizationWithKeys(ctx context.Context, id int64) (GetFabricOrganizationWithKeysRow, error) { - row := q.queryRow(ctx, q.getFabricOrganizationWithKeysStmt, getFabricOrganizationWithKeys, id) + CaConfig sql.NullString `json:"caConfig"` + SignKeyID sql.NullInt64 `json:"signKeyId"` + TlsRootKeyID sql.NullInt64 `json:"tlsRootKeyId"` + AdminTlsKeyID sql.NullInt64 `json:"adminTlsKeyId"` + AdminSignKeyID sql.NullInt64 `json:"adminSignKeyId"` + ClientSignKeyID sql.NullInt64 `json:"clientSignKeyId"` + ProviderID sql.NullInt64 `json:"providerId"` + CreatedAt time.Time `json:"createdAt"` + CreatedBy sql.NullInt64 `json:"createdBy"` + UpdatedAt sql.NullTime `json:"updatedAt"` + CrlKeyID sql.NullInt64 `json:"crlKeyId"` + CrlLastUpdate sql.NullTime `json:"crlLastUpdate"` + SignPublicKey sql.NullString `json:"signPublicKey"` + SignCertificate sql.NullString `json:"signCertificate"` + TlsPublicKey sql.NullString `json:"tlsPublicKey"` + TlsCertificate sql.NullString `json:"tlsCertificate"` + ProviderName sql.NullString `json:"providerName"` +} + +func (q *Queries) GetFabricOrganizationWithKeys(ctx context.Context, id int64) (*GetFabricOrganizationWithKeysRow, error) { + row := q.db.QueryRowContext(ctx, GetFabricOrganizationWithKeys, id) var i GetFabricOrganizationWithKeysRow err := row.Scan( &i.ID, @@ -1730,16 +1821,18 @@ func (q *Queries) GetFabricOrganizationWithKeys(ctx context.Context, id int64) ( &i.CreatedAt, &i.CreatedBy, &i.UpdatedAt, + &i.CrlKeyID, + &i.CrlLastUpdate, &i.SignPublicKey, &i.SignCertificate, &i.TlsPublicKey, &i.TlsCertificate, &i.ProviderName, ) - return i, err + return &i, err } -const getKey = `-- name: GetKey :one +const GetKey = `-- name: GetKey :one SELECT k.id, k.name, k.description, k.algorithm, k.key_size, k.curve, k.format, k.public_key, k.private_key, k.certificate, k.status, k.created_at, k.updated_at, k.expires_at, k.last_rotated_at, k.signing_key_id, k.sha256_fingerprint, k.sha1_fingerprint, k.provider_id, k.user_id, k.is_ca, k.ethereum_address, kp.name as provider_name, kp.type as provider_type FROM keys k JOIN key_providers kp ON k.provider_id = kp.id @@ -1751,30 +1844,30 @@ type GetKeyRow struct { Name string `json:"name"` Description sql.NullString `json:"description"` Algorithm string `json:"algorithm"` - KeySize sql.NullInt64 `json:"key_size"` + KeySize sql.NullInt64 `json:"keySize"` Curve sql.NullString `json:"curve"` Format string `json:"format"` - PublicKey string `json:"public_key"` - PrivateKey string `json:"private_key"` + PublicKey string `json:"publicKey"` + PrivateKey string `json:"privateKey"` Certificate sql.NullString `json:"certificate"` Status string `json:"status"` - CreatedAt time.Time `json:"created_at"` - UpdatedAt time.Time `json:"updated_at"` - ExpiresAt sql.NullTime `json:"expires_at"` - LastRotatedAt sql.NullTime `json:"last_rotated_at"` - SigningKeyID sql.NullInt64 `json:"signing_key_id"` - Sha256Fingerprint string `json:"sha256_fingerprint"` - Sha1Fingerprint string `json:"sha1_fingerprint"` - ProviderID int64 `json:"provider_id"` - UserID int64 `json:"user_id"` - IsCa int64 `json:"is_ca"` - EthereumAddress sql.NullString `json:"ethereum_address"` - ProviderName string `json:"provider_name"` - ProviderType string `json:"provider_type"` -} - -func (q *Queries) GetKey(ctx context.Context, id int64) (GetKeyRow, error) { - row := q.queryRow(ctx, q.getKeyStmt, getKey, id) + CreatedAt time.Time `json:"createdAt"` + UpdatedAt time.Time `json:"updatedAt"` + ExpiresAt sql.NullTime `json:"expiresAt"` + LastRotatedAt sql.NullTime `json:"lastRotatedAt"` + SigningKeyID sql.NullInt64 `json:"signingKeyId"` + Sha256Fingerprint string `json:"sha256Fingerprint"` + Sha1Fingerprint string `json:"sha1Fingerprint"` + ProviderID int64 `json:"providerId"` + UserID int64 `json:"userId"` + IsCa int64 `json:"isCa"` + EthereumAddress sql.NullString `json:"ethereumAddress"` + ProviderName string `json:"providerName"` + ProviderType string `json:"providerType"` +} + +func (q *Queries) GetKey(ctx context.Context, id int64) (*GetKeyRow, error) { + row := q.db.QueryRowContext(ctx, GetKey, id) var i GetKeyRow err := row.Scan( &i.ID, @@ -1802,10 +1895,10 @@ func (q *Queries) GetKey(ctx context.Context, id int64) (GetKeyRow, error) { &i.ProviderName, &i.ProviderType, ) - return i, err + return &i, err } -const getKeyByEthereumAddress = `-- name: GetKeyByEthereumAddress :one +const GetKeyByEthereumAddress = `-- name: GetKeyByEthereumAddress :one SELECT k.id, k.name, k.description, k.algorithm, k.key_size, k.curve, k.format, k.public_key, k.private_key, k.certificate, k.status, k.created_at, k.updated_at, k.expires_at, k.last_rotated_at, k.signing_key_id, k.sha256_fingerprint, k.sha1_fingerprint, k.provider_id, k.user_id, k.is_ca, k.ethereum_address, kp.name as provider_name, kp.type as provider_type FROM keys k JOIN key_providers kp ON k.provider_id = kp.id @@ -1817,30 +1910,30 @@ type GetKeyByEthereumAddressRow struct { Name string `json:"name"` Description sql.NullString `json:"description"` Algorithm string `json:"algorithm"` - KeySize sql.NullInt64 `json:"key_size"` + KeySize sql.NullInt64 `json:"keySize"` Curve sql.NullString `json:"curve"` Format string `json:"format"` - PublicKey string `json:"public_key"` - PrivateKey string `json:"private_key"` + PublicKey string `json:"publicKey"` + PrivateKey string `json:"privateKey"` Certificate sql.NullString `json:"certificate"` Status string `json:"status"` - CreatedAt time.Time `json:"created_at"` - UpdatedAt time.Time `json:"updated_at"` - ExpiresAt sql.NullTime `json:"expires_at"` - LastRotatedAt sql.NullTime `json:"last_rotated_at"` - SigningKeyID sql.NullInt64 `json:"signing_key_id"` - Sha256Fingerprint string `json:"sha256_fingerprint"` - Sha1Fingerprint string `json:"sha1_fingerprint"` - ProviderID int64 `json:"provider_id"` - UserID int64 `json:"user_id"` - IsCa int64 `json:"is_ca"` - EthereumAddress sql.NullString `json:"ethereum_address"` - ProviderName string `json:"provider_name"` - ProviderType string `json:"provider_type"` -} - -func (q *Queries) GetKeyByEthereumAddress(ctx context.Context, ethereumAddress sql.NullString) (GetKeyByEthereumAddressRow, error) { - row := q.queryRow(ctx, q.getKeyByEthereumAddressStmt, getKeyByEthereumAddress, ethereumAddress) + CreatedAt time.Time `json:"createdAt"` + UpdatedAt time.Time `json:"updatedAt"` + ExpiresAt sql.NullTime `json:"expiresAt"` + LastRotatedAt sql.NullTime `json:"lastRotatedAt"` + SigningKeyID sql.NullInt64 `json:"signingKeyId"` + Sha256Fingerprint string `json:"sha256Fingerprint"` + Sha1Fingerprint string `json:"sha1Fingerprint"` + ProviderID int64 `json:"providerId"` + UserID int64 `json:"userId"` + IsCa int64 `json:"isCa"` + EthereumAddress sql.NullString `json:"ethereumAddress"` + ProviderName string `json:"providerName"` + ProviderType string `json:"providerType"` +} + +func (q *Queries) GetKeyByEthereumAddress(ctx context.Context, ethereumAddress sql.NullString) (*GetKeyByEthereumAddressRow, error) { + row := q.db.QueryRowContext(ctx, GetKeyByEthereumAddress, ethereumAddress) var i GetKeyByEthereumAddressRow err := row.Scan( &i.ID, @@ -1868,10 +1961,10 @@ func (q *Queries) GetKeyByEthereumAddress(ctx context.Context, ethereumAddress s &i.ProviderName, &i.ProviderType, ) - return i, err + return &i, err } -const getKeyByID = `-- name: GetKeyByID :one +const GetKeyByID = `-- name: GetKeyByID :one SELECT k.id, k.name, k.description, k.algorithm, k.key_size, k.curve, k.format, k.public_key, k.private_key, k.certificate, k.status, k.created_at, k.updated_at, k.expires_at, k.last_rotated_at, k.signing_key_id, k.sha256_fingerprint, k.sha1_fingerprint, k.provider_id, k.user_id, k.is_ca, k.ethereum_address, kp.name as provider_name, kp.type as provider_type FROM keys k JOIN key_providers kp ON k.provider_id = kp.id @@ -1883,30 +1976,30 @@ type GetKeyByIDRow struct { Name string `json:"name"` Description sql.NullString `json:"description"` Algorithm string `json:"algorithm"` - KeySize sql.NullInt64 `json:"key_size"` + KeySize sql.NullInt64 `json:"keySize"` Curve sql.NullString `json:"curve"` Format string `json:"format"` - PublicKey string `json:"public_key"` - PrivateKey string `json:"private_key"` + PublicKey string `json:"publicKey"` + PrivateKey string `json:"privateKey"` Certificate sql.NullString `json:"certificate"` Status string `json:"status"` - CreatedAt time.Time `json:"created_at"` - UpdatedAt time.Time `json:"updated_at"` - ExpiresAt sql.NullTime `json:"expires_at"` - LastRotatedAt sql.NullTime `json:"last_rotated_at"` - SigningKeyID sql.NullInt64 `json:"signing_key_id"` - Sha256Fingerprint string `json:"sha256_fingerprint"` - Sha1Fingerprint string `json:"sha1_fingerprint"` - ProviderID int64 `json:"provider_id"` - UserID int64 `json:"user_id"` - IsCa int64 `json:"is_ca"` - EthereumAddress sql.NullString `json:"ethereum_address"` - ProviderName string `json:"provider_name"` - ProviderType string `json:"provider_type"` -} - -func (q *Queries) GetKeyByID(ctx context.Context, id int64) (GetKeyByIDRow, error) { - row := q.queryRow(ctx, q.getKeyByIDStmt, getKeyByID, id) + CreatedAt time.Time `json:"createdAt"` + UpdatedAt time.Time `json:"updatedAt"` + ExpiresAt sql.NullTime `json:"expiresAt"` + LastRotatedAt sql.NullTime `json:"lastRotatedAt"` + SigningKeyID sql.NullInt64 `json:"signingKeyId"` + Sha256Fingerprint string `json:"sha256Fingerprint"` + Sha1Fingerprint string `json:"sha1Fingerprint"` + ProviderID int64 `json:"providerId"` + UserID int64 `json:"userId"` + IsCa int64 `json:"isCa"` + EthereumAddress sql.NullString `json:"ethereumAddress"` + ProviderName string `json:"providerName"` + ProviderType string `json:"providerType"` +} + +func (q *Queries) GetKeyByID(ctx context.Context, id int64) (*GetKeyByIDRow, error) { + row := q.db.QueryRowContext(ctx, GetKeyByID, id) var i GetKeyByIDRow err := row.Scan( &i.ID, @@ -1934,26 +2027,26 @@ func (q *Queries) GetKeyByID(ctx context.Context, id int64) (GetKeyByIDRow, erro &i.ProviderName, &i.ProviderType, ) - return i, err + return &i, err } -const getKeyCountByProvider = `-- name: GetKeyCountByProvider :one +const GetKeyCountByProvider = `-- name: GetKeyCountByProvider :one SELECT COUNT(*) FROM keys WHERE provider_id = ? ` func (q *Queries) GetKeyCountByProvider(ctx context.Context, providerID int64) (int64, error) { - row := q.queryRow(ctx, q.getKeyCountByProviderStmt, getKeyCountByProvider, providerID) + row := q.db.QueryRowContext(ctx, GetKeyCountByProvider, providerID) var count int64 err := row.Scan(&count) return count, err } -const getKeyProvider = `-- name: GetKeyProvider :one +const GetKeyProvider = `-- name: GetKeyProvider :one SELECT id, name, type, is_default, config, created_at, updated_at FROM key_providers WHERE id = ? ` -func (q *Queries) GetKeyProvider(ctx context.Context, id int64) (KeyProvider, error) { - row := q.queryRow(ctx, q.getKeyProviderStmt, getKeyProvider, id) +func (q *Queries) GetKeyProvider(ctx context.Context, id int64) (*KeyProvider, error) { + row := q.db.QueryRowContext(ctx, GetKeyProvider, id) var i KeyProvider err := row.Scan( &i.ID, @@ -1964,15 +2057,15 @@ func (q *Queries) GetKeyProvider(ctx context.Context, id int64) (KeyProvider, er &i.CreatedAt, &i.UpdatedAt, ) - return i, err + return &i, err } -const getKeyProviderByDefault = `-- name: GetKeyProviderByDefault :one +const GetKeyProviderByDefault = `-- name: GetKeyProviderByDefault :one SELECT id, name, type, is_default, config, created_at, updated_at FROM key_providers WHERE is_default = 1 LIMIT 1 ` -func (q *Queries) GetKeyProviderByDefault(ctx context.Context) (KeyProvider, error) { - row := q.queryRow(ctx, q.getKeyProviderByDefaultStmt, getKeyProviderByDefault) +func (q *Queries) GetKeyProviderByDefault(ctx context.Context) (*KeyProvider, error) { + row := q.db.QueryRowContext(ctx, GetKeyProviderByDefault) var i KeyProvider err := row.Scan( &i.ID, @@ -1983,15 +2076,15 @@ func (q *Queries) GetKeyProviderByDefault(ctx context.Context) (KeyProvider, err &i.CreatedAt, &i.UpdatedAt, ) - return i, err + return &i, err } -const getKeyProviderByID = `-- name: GetKeyProviderByID :one +const GetKeyProviderByID = `-- name: GetKeyProviderByID :one SELECT id, name, type, is_default, config, created_at, updated_at FROM key_providers WHERE id = ? ` -func (q *Queries) GetKeyProviderByID(ctx context.Context, id int64) (KeyProvider, error) { - row := q.queryRow(ctx, q.getKeyProviderByIDStmt, getKeyProviderByID, id) +func (q *Queries) GetKeyProviderByID(ctx context.Context, id int64) (*KeyProvider, error) { + row := q.db.QueryRowContext(ctx, GetKeyProviderByID, id) var i KeyProvider err := row.Scan( &i.ID, @@ -2002,77 +2095,70 @@ func (q *Queries) GetKeyProviderByID(ctx context.Context, id int64) (KeyProvider &i.CreatedAt, &i.UpdatedAt, ) - return i, err -} - -const getKeysByAlgorithm = `-- name: GetKeysByAlgorithm :many -SELECT id, name, description, algorithm, key_size, curve, format, public_key, private_key, certificate, status, created_at, updated_at, expires_at, last_rotated_at, signing_key_id, sha256_fingerprint, sha1_fingerprint, provider_id, user_id, is_ca, ethereum_address FROM keys WHERE algorithm = ? -` - -func (q *Queries) GetKeysByAlgorithm(ctx context.Context, algorithm string) ([]Key, error) { - rows, err := q.query(ctx, q.getKeysByAlgorithmStmt, getKeysByAlgorithm, algorithm) - if err != nil { - return nil, err - } - defer rows.Close() - items := []Key{} - for rows.Next() { - var i Key - if err := rows.Scan( - &i.ID, - &i.Name, - &i.Description, - &i.Algorithm, - &i.KeySize, - &i.Curve, - &i.Format, - &i.PublicKey, - &i.PrivateKey, - &i.Certificate, - &i.Status, - &i.CreatedAt, - &i.UpdatedAt, - &i.ExpiresAt, - &i.LastRotatedAt, - &i.SigningKeyID, - &i.Sha256Fingerprint, - &i.Sha1Fingerprint, - &i.ProviderID, - &i.UserID, - &i.IsCa, - &i.EthereumAddress, - ); err != nil { - return nil, err - } - items = append(items, i) - } - if err := rows.Close(); err != nil { - return nil, err - } - if err := rows.Err(); err != nil { - return nil, err - } - return items, nil + return &i, err } -const getKeysByProviderAndCurve = `-- name: GetKeysByProviderAndCurve :many -SELECT id, name, description, algorithm, key_size, curve, format, public_key, private_key, certificate, status, created_at, updated_at, expires_at, last_rotated_at, signing_key_id, sha256_fingerprint, sha1_fingerprint, provider_id, user_id, is_ca, ethereum_address FROM keys WHERE provider_id = ? AND curve = ? +const GetKeysByFilter = `-- name: GetKeysByFilter :many +SELECT k.id, k.name, k.description, k.algorithm, k.key_size, k.curve, k.format, k.public_key, k.private_key, k.certificate, k.status, k.created_at, k.updated_at, k.expires_at, k.last_rotated_at, k.signing_key_id, k.sha256_fingerprint, k.sha1_fingerprint, k.provider_id, k.user_id, k.is_ca, k.ethereum_address, kp.name as provider_name, kp.type as provider_type +FROM keys k +JOIN key_providers kp ON k.provider_id = kp.id +WHERE (?1 = '' OR k.algorithm = ?2) + AND (?3 = 0 OR k.provider_id = ?4) + AND (?5 = '' OR k.curve = ?6) ` -type GetKeysByProviderAndCurveParams struct { - ProviderID int64 `json:"provider_id"` - Curve sql.NullString `json:"curve"` +type GetKeysByFilterParams struct { + AlgorithmFilter interface{} `json:"algorithmFilter"` + Algorithm string `json:"algorithm"` + ProviderIDFilter interface{} `json:"providerIdFilter"` + ProviderID int64 `json:"providerId"` + CurveFilter interface{} `json:"curveFilter"` + Curve sql.NullString `json:"curve"` } -func (q *Queries) GetKeysByProviderAndCurve(ctx context.Context, arg GetKeysByProviderAndCurveParams) ([]Key, error) { - rows, err := q.query(ctx, q.getKeysByProviderAndCurveStmt, getKeysByProviderAndCurve, arg.ProviderID, arg.Curve) +type GetKeysByFilterRow struct { + ID int64 `json:"id"` + Name string `json:"name"` + Description sql.NullString `json:"description"` + Algorithm string `json:"algorithm"` + KeySize sql.NullInt64 `json:"keySize"` + Curve sql.NullString `json:"curve"` + Format string `json:"format"` + PublicKey string `json:"publicKey"` + PrivateKey string `json:"privateKey"` + Certificate sql.NullString `json:"certificate"` + Status string `json:"status"` + CreatedAt time.Time `json:"createdAt"` + UpdatedAt time.Time `json:"updatedAt"` + ExpiresAt sql.NullTime `json:"expiresAt"` + LastRotatedAt sql.NullTime `json:"lastRotatedAt"` + SigningKeyID sql.NullInt64 `json:"signingKeyId"` + Sha256Fingerprint string `json:"sha256Fingerprint"` + Sha1Fingerprint string `json:"sha1Fingerprint"` + ProviderID int64 `json:"providerId"` + UserID int64 `json:"userId"` + IsCa int64 `json:"isCa"` + EthereumAddress sql.NullString `json:"ethereumAddress"` + ProviderName string `json:"providerName"` + ProviderType string `json:"providerType"` +} + +func (q *Queries) GetKeysByFilter(ctx context.Context, arg *GetKeysByFilterParams) ([]*GetKeysByFilterRow, error) { + rows, err := q.db.QueryContext(ctx, GetKeysByFilter, + arg.AlgorithmFilter, + arg.Algorithm, + arg.ProviderIDFilter, + arg.ProviderID, + arg.CurveFilter, + arg.Curve, + ) if err != nil { return nil, err } defer rows.Close() - items := []Key{} + items := []*GetKeysByFilterRow{} for rows.Next() { - var i Key + var i GetKeysByFilterRow if err := rows.Scan( &i.ID, &i.Name, @@ -2096,10 +2182,12 @@ func (q *Queries) GetKeysByProviderAndCurve(ctx context.Context, arg GetKeysByPr &i.UserID, &i.IsCa, &i.EthereumAddress, + &i.ProviderName, + &i.ProviderType, ); err != nil { return nil, err } - items = append(items, i) + items = append(items, &i) } if err := rows.Close(); err != nil { return nil, err @@ -2110,26 +2198,26 @@ func (q *Queries) GetKeysByProviderAndCurve(ctx context.Context, arg GetKeysByPr return items, nil } -const getKeysCount = `-- name: GetKeysCount :one +const GetKeysCount = `-- name: GetKeysCount :one SELECT COUNT(*) FROM keys ` func (q *Queries) GetKeysCount(ctx context.Context) (int64, error) { - row := q.queryRow(ctx, q.getKeysCountStmt, getKeysCount) + row := q.db.QueryRowContext(ctx, GetKeysCount) var count int64 err := row.Scan(&count) return count, err } -const getLatestNodeEvent = `-- name: GetLatestNodeEvent :one +const GetLatestNodeEvent = `-- name: GetLatestNodeEvent :one SELECT id, node_id, event_type, description, data, status, created_at FROM node_events WHERE node_id = ? ORDER BY created_at DESC LIMIT 1 ` -func (q *Queries) GetLatestNodeEvent(ctx context.Context, nodeID int64) (NodeEvent, error) { - row := q.queryRow(ctx, q.getLatestNodeEventStmt, getLatestNodeEvent, nodeID) +func (q *Queries) GetLatestNodeEvent(ctx context.Context, nodeID int64) (*NodeEvent, error) { + row := q.db.QueryRowContext(ctx, GetLatestNodeEvent, nodeID) var i NodeEvent err := row.Scan( &i.ID, @@ -2140,16 +2228,16 @@ func (q *Queries) GetLatestNodeEvent(ctx context.Context, nodeID int64) (NodeEve &i.Status, &i.CreatedAt, ) - return i, err + return &i, err } -const getNetwork = `-- name: GetNetwork :one +const GetNetwork = `-- name: GetNetwork :one SELECT id, name, network_id, platform, status, description, config, deployment_config, exposed_ports, domain, created_at, created_by, updated_at, genesis_block_b64, current_config_block_b64 FROM networks WHERE id = ? LIMIT 1 ` -func (q *Queries) GetNetwork(ctx context.Context, id int64) (Network, error) { - row := q.queryRow(ctx, q.getNetworkStmt, getNetwork, id) +func (q *Queries) GetNetwork(ctx context.Context, id int64) (*Network, error) { + row := q.db.QueryRowContext(ctx, GetNetwork, id) var i Network err := row.Scan( &i.ID, @@ -2168,16 +2256,16 @@ func (q *Queries) GetNetwork(ctx context.Context, id int64) (Network, error) { &i.GenesisBlockB64, &i.CurrentConfigBlockB64, ) - return i, err + return &i, err } -const getNetworkByName = `-- name: GetNetworkByName :one +const GetNetworkByName = `-- name: GetNetworkByName :one SELECT id, name, network_id, platform, status, description, config, deployment_config, exposed_ports, domain, created_at, created_by, updated_at, genesis_block_b64, current_config_block_b64 FROM networks WHERE name = ? LIMIT 1 ` -func (q *Queries) GetNetworkByName(ctx context.Context, name string) (Network, error) { - row := q.queryRow(ctx, q.getNetworkByNameStmt, getNetworkByName, name) +func (q *Queries) GetNetworkByName(ctx context.Context, name string) (*Network, error) { + row := q.db.QueryRowContext(ctx, GetNetworkByName, name) var i Network err := row.Scan( &i.ID, @@ -2196,16 +2284,16 @@ func (q *Queries) GetNetworkByName(ctx context.Context, name string) (Network, e &i.GenesisBlockB64, &i.CurrentConfigBlockB64, ) - return i, err + return &i, err } -const getNetworkByNetworkId = `-- name: GetNetworkByNetworkId :one +const GetNetworkByNetworkId = `-- name: GetNetworkByNetworkId :one SELECT id, name, network_id, platform, status, description, config, deployment_config, exposed_ports, domain, created_at, created_by, updated_at, genesis_block_b64, current_config_block_b64 FROM networks WHERE network_id = ? LIMIT 1 ` -func (q *Queries) GetNetworkByNetworkId(ctx context.Context, networkID sql.NullString) (Network, error) { - row := q.queryRow(ctx, q.getNetworkByNetworkIdStmt, getNetworkByNetworkId, networkID) +func (q *Queries) GetNetworkByNetworkId(ctx context.Context, networkID sql.NullString) (*Network, error) { + row := q.db.QueryRowContext(ctx, GetNetworkByNetworkId, networkID) var i Network err := row.Scan( &i.ID, @@ -2224,33 +2312,33 @@ func (q *Queries) GetNetworkByNetworkId(ctx context.Context, networkID sql.NullS &i.GenesisBlockB64, &i.CurrentConfigBlockB64, ) - return i, err + return &i, err } -const getNetworkCurrentConfigBlock = `-- name: GetNetworkCurrentConfigBlock :one +const GetNetworkCurrentConfigBlock = `-- name: GetNetworkCurrentConfigBlock :one SELECT current_config_block_b64 FROM networks WHERE id = ? ` func (q *Queries) GetNetworkCurrentConfigBlock(ctx context.Context, id int64) (sql.NullString, error) { - row := q.queryRow(ctx, q.getNetworkCurrentConfigBlockStmt, getNetworkCurrentConfigBlock, id) + row := q.db.QueryRowContext(ctx, GetNetworkCurrentConfigBlock, id) var current_config_block_b64 sql.NullString err := row.Scan(¤t_config_block_b64) return current_config_block_b64, err } -const getNetworkNode = `-- name: GetNetworkNode :one +const GetNetworkNode = `-- name: GetNetworkNode :one SELECT id, network_id, node_id, role, status, config, created_at, updated_at FROM network_nodes WHERE network_id = ? AND node_id = ? ` type GetNetworkNodeParams struct { - NetworkID int64 `json:"network_id"` - NodeID int64 `json:"node_id"` + NetworkID int64 `json:"networkId"` + NodeID int64 `json:"nodeId"` } -func (q *Queries) GetNetworkNode(ctx context.Context, arg GetNetworkNodeParams) (NetworkNode, error) { - row := q.queryRow(ctx, q.getNetworkNodeStmt, getNetworkNode, arg.NetworkID, arg.NodeID) +func (q *Queries) GetNetworkNode(ctx context.Context, arg *GetNetworkNodeParams) (*NetworkNode, error) { + row := q.db.QueryRowContext(ctx, GetNetworkNode, arg.NetworkID, arg.NodeID) var i NetworkNode err := row.Scan( &i.ID, @@ -2262,11 +2350,11 @@ func (q *Queries) GetNetworkNode(ctx context.Context, arg GetNetworkNodeParams) &i.CreatedAt, &i.UpdatedAt, ) - return i, err + return &i, err } -const getNetworkNodes = `-- name: GetNetworkNodes :many -SELECT nn.id, nn.network_id, nn.node_id, nn.role, nn.status, nn.config, nn.created_at, nn.updated_at, n.id, n.name, n.slug, n.platform, n.status, n.description, n.network_id, n.config, n.resources, n.endpoint, n.public_endpoint, n.p2p_address, n.created_at, n.created_by, n.updated_at, n.fabric_organization_id, n.node_type, n.node_config, n.deployment_config +const GetNetworkNodes = `-- name: GetNetworkNodes :many +SELECT nn.id, nn.network_id, nn.node_id, nn.role, nn.status, nn.config, nn.created_at, nn.updated_at, n.id, n.name, n.slug, n.platform, n.status, n.description, n.network_id, n.config, n.resources, n.endpoint, n.public_endpoint, n.p2p_address, n.created_at, n.created_by, n.updated_at, n.fabric_organization_id, n.node_type, n.node_config, n.deployment_config, n.error_message FROM network_nodes nn JOIN nodes n ON nn.node_id = n.id WHERE nn.network_id = ? @@ -2275,41 +2363,42 @@ ORDER BY nn.created_at DESC type GetNetworkNodesRow struct { ID int64 `json:"id"` - NetworkID int64 `json:"network_id"` - NodeID int64 `json:"node_id"` + NetworkID int64 `json:"networkId"` + NodeID int64 `json:"nodeId"` Role string `json:"role"` Status string `json:"status"` Config sql.NullString `json:"config"` - CreatedAt time.Time `json:"created_at"` - UpdatedAt time.Time `json:"updated_at"` - ID_2 int64 `json:"id_2"` + CreatedAt time.Time `json:"createdAt"` + UpdatedAt time.Time `json:"updatedAt"` + ID_2 int64 `json:"id2"` Name string `json:"name"` Slug string `json:"slug"` Platform string `json:"platform"` - Status_2 string `json:"status_2"` + Status_2 string `json:"status2"` Description sql.NullString `json:"description"` - NetworkID_2 sql.NullInt64 `json:"network_id_2"` - Config_2 sql.NullString `json:"config_2"` + NetworkID_2 sql.NullInt64 `json:"networkId2"` + Config_2 sql.NullString `json:"config2"` Resources sql.NullString `json:"resources"` Endpoint sql.NullString `json:"endpoint"` - PublicEndpoint sql.NullString `json:"public_endpoint"` - P2pAddress sql.NullString `json:"p2p_address"` - CreatedAt_2 time.Time `json:"created_at_2"` - CreatedBy sql.NullInt64 `json:"created_by"` - UpdatedAt_2 sql.NullTime `json:"updated_at_2"` - FabricOrganizationID sql.NullInt64 `json:"fabric_organization_id"` - NodeType sql.NullString `json:"node_type"` - NodeConfig sql.NullString `json:"node_config"` - DeploymentConfig sql.NullString `json:"deployment_config"` -} - -func (q *Queries) GetNetworkNodes(ctx context.Context, networkID int64) ([]GetNetworkNodesRow, error) { - rows, err := q.query(ctx, q.getNetworkNodesStmt, getNetworkNodes, networkID) + PublicEndpoint sql.NullString `json:"publicEndpoint"` + P2pAddress sql.NullString `json:"p2pAddress"` + CreatedAt_2 time.Time `json:"createdAt2"` + CreatedBy sql.NullInt64 `json:"createdBy"` + UpdatedAt_2 sql.NullTime `json:"updatedAt2"` + FabricOrganizationID sql.NullInt64 `json:"fabricOrganizationId"` + NodeType sql.NullString `json:"nodeType"` + NodeConfig sql.NullString `json:"nodeConfig"` + DeploymentConfig sql.NullString `json:"deploymentConfig"` + ErrorMessage sql.NullString `json:"errorMessage"` +} + +func (q *Queries) GetNetworkNodes(ctx context.Context, networkID int64) ([]*GetNetworkNodesRow, error) { + rows, err := q.db.QueryContext(ctx, GetNetworkNodes, networkID) if err != nil { return nil, err } defer rows.Close() - items := []GetNetworkNodesRow{} + items := []*GetNetworkNodesRow{} for rows.Next() { var i GetNetworkNodesRow if err := rows.Scan( @@ -2340,10 +2429,11 @@ func (q *Queries) GetNetworkNodes(ctx context.Context, networkID int64) ([]GetNe &i.NodeType, &i.NodeConfig, &i.DeploymentConfig, + &i.ErrorMessage, ); err != nil { return nil, err } - items = append(items, i) + items = append(items, &i) } if err := rows.Close(); err != nil { return nil, err @@ -2354,13 +2444,13 @@ func (q *Queries) GetNetworkNodes(ctx context.Context, networkID int64) ([]GetNe return items, nil } -const getNode = `-- name: GetNode :one -SELECT id, name, slug, platform, status, description, network_id, config, resources, endpoint, public_endpoint, p2p_address, created_at, created_by, updated_at, fabric_organization_id, node_type, node_config, deployment_config FROM nodes +const GetNode = `-- name: GetNode :one +SELECT id, name, slug, platform, status, description, network_id, config, resources, endpoint, public_endpoint, p2p_address, created_at, created_by, updated_at, fabric_organization_id, node_type, node_config, deployment_config, error_message FROM nodes WHERE id = ? LIMIT 1 ` -func (q *Queries) GetNode(ctx context.Context, id int64) (Node, error) { - row := q.queryRow(ctx, q.getNodeStmt, getNode, id) +func (q *Queries) GetNode(ctx context.Context, id int64) (*Node, error) { + row := q.db.QueryRowContext(ctx, GetNode, id) var i Node err := row.Scan( &i.ID, @@ -2382,16 +2472,17 @@ func (q *Queries) GetNode(ctx context.Context, id int64) (Node, error) { &i.NodeType, &i.NodeConfig, &i.DeploymentConfig, + &i.ErrorMessage, ) - return i, err + return &i, err } -const getNodeBySlug = `-- name: GetNodeBySlug :one -SELECT id, name, slug, platform, status, description, network_id, config, resources, endpoint, public_endpoint, p2p_address, created_at, created_by, updated_at, fabric_organization_id, node_type, node_config, deployment_config FROM nodes WHERE slug = ? +const GetNodeBySlug = `-- name: GetNodeBySlug :one +SELECT id, name, slug, platform, status, description, network_id, config, resources, endpoint, public_endpoint, p2p_address, created_at, created_by, updated_at, fabric_organization_id, node_type, node_config, deployment_config, error_message FROM nodes WHERE slug = ? ` -func (q *Queries) GetNodeBySlug(ctx context.Context, slug string) (Node, error) { - row := q.queryRow(ctx, q.getNodeBySlugStmt, getNodeBySlug, slug) +func (q *Queries) GetNodeBySlug(ctx context.Context, slug string) (*Node, error) { + row := q.db.QueryRowContext(ctx, GetNodeBySlug, slug) var i Node err := row.Scan( &i.ID, @@ -2413,17 +2504,18 @@ func (q *Queries) GetNodeBySlug(ctx context.Context, slug string) (Node, error) &i.NodeType, &i.NodeConfig, &i.DeploymentConfig, + &i.ErrorMessage, ) - return i, err + return &i, err } -const getNodeEvent = `-- name: GetNodeEvent :one +const GetNodeEvent = `-- name: GetNodeEvent :one SELECT id, node_id, event_type, description, data, status, created_at FROM node_events WHERE id = ? LIMIT 1 ` -func (q *Queries) GetNodeEvent(ctx context.Context, id int64) (NodeEvent, error) { - row := q.queryRow(ctx, q.getNodeEventStmt, getNodeEvent, id) +func (q *Queries) GetNodeEvent(ctx context.Context, id int64) (*NodeEvent, error) { + row := q.db.QueryRowContext(ctx, GetNodeEvent, id) var i NodeEvent err := row.Scan( &i.ID, @@ -2434,16 +2526,16 @@ func (q *Queries) GetNodeEvent(ctx context.Context, id int64) (NodeEvent, error) &i.Status, &i.CreatedAt, ) - return i, err + return &i, err } -const getNotificationProvider = `-- name: GetNotificationProvider :one +const GetNotificationProvider = `-- name: GetNotificationProvider :one SELECT id, name, type, config, is_default, is_enabled, created_at, updated_at, notify_node_downtime, notify_backup_success, notify_backup_failure, notify_s3_connection_issue, last_test_at, last_test_status, last_test_message FROM notification_providers WHERE id = ? LIMIT 1 ` -func (q *Queries) GetNotificationProvider(ctx context.Context, id int64) (NotificationProvider, error) { - row := q.queryRow(ctx, q.getNotificationProviderStmt, getNotificationProvider, id) +func (q *Queries) GetNotificationProvider(ctx context.Context, id int64) (*NotificationProvider, error) { + row := q.db.QueryRowContext(ctx, GetNotificationProvider, id) var i NotificationProvider err := row.Scan( &i.ID, @@ -2462,18 +2554,18 @@ func (q *Queries) GetNotificationProvider(ctx context.Context, id int64) (Notifi &i.LastTestStatus, &i.LastTestMessage, ) - return i, err + return &i, err } -const getOldestBackupByTarget = `-- name: GetOldestBackupByTarget :one +const GetOldestBackupByTarget = `-- name: GetOldestBackupByTarget :one SELECT id, schedule_id, target_id, status, size_bytes, started_at, completed_at, error_message, created_at, notification_sent FROM backups WHERE target_id = ? ORDER BY created_at ASC LIMIT 1 ` -func (q *Queries) GetOldestBackupByTarget(ctx context.Context, targetID int64) (Backup, error) { - row := q.queryRow(ctx, q.getOldestBackupByTargetStmt, getOldestBackupByTarget, targetID) +func (q *Queries) GetOldestBackupByTarget(ctx context.Context, targetID int64) (*Backup, error) { + row := q.db.QueryRowContext(ctx, GetOldestBackupByTarget, targetID) var i Backup err := row.Scan( &i.ID, @@ -2487,10 +2579,10 @@ func (q *Queries) GetOldestBackupByTarget(ctx context.Context, targetID int64) ( &i.CreatedAt, &i.NotificationSent, ) - return i, err + return &i, err } -const getOrdererPorts = `-- name: GetOrdererPorts :many +const GetOrdererPorts = `-- name: GetOrdererPorts :many SELECT endpoint, public_endpoint FROM nodes WHERE node_type = 'fabric-orderer' @@ -2499,22 +2591,22 @@ AND (endpoint IS NOT NULL OR public_endpoint IS NOT NULL) type GetOrdererPortsRow struct { Endpoint sql.NullString `json:"endpoint"` - PublicEndpoint sql.NullString `json:"public_endpoint"` + PublicEndpoint sql.NullString `json:"publicEndpoint"` } -func (q *Queries) GetOrdererPorts(ctx context.Context) ([]GetOrdererPortsRow, error) { - rows, err := q.query(ctx, q.getOrdererPortsStmt, getOrdererPorts) +func (q *Queries) GetOrdererPorts(ctx context.Context) ([]*GetOrdererPortsRow, error) { + rows, err := q.db.QueryContext(ctx, GetOrdererPorts) if err != nil { return nil, err } defer rows.Close() - items := []GetOrdererPortsRow{} + items := []*GetOrdererPortsRow{} for rows.Next() { var i GetOrdererPortsRow if err := rows.Scan(&i.Endpoint, &i.PublicEndpoint); err != nil { return nil, err } - items = append(items, i) + items = append(items, &i) } if err := rows.Close(); err != nil { return nil, err @@ -2525,7 +2617,25 @@ func (q *Queries) GetOrdererPorts(ctx context.Context) ([]GetOrdererPortsRow, er return items, nil } -const getPeerPorts = `-- name: GetPeerPorts :many +const GetOrganizationCRLInfo = `-- name: GetOrganizationCRLInfo :one +SELECT crl_key_id, crl_last_update +FROM fabric_organizations +WHERE id = ? +` + +type GetOrganizationCRLInfoRow struct { + CrlKeyID sql.NullInt64 `json:"crlKeyId"` + CrlLastUpdate sql.NullTime `json:"crlLastUpdate"` +} + +func (q *Queries) GetOrganizationCRLInfo(ctx context.Context, id int64) (*GetOrganizationCRLInfoRow, error) { + row := q.db.QueryRowContext(ctx, GetOrganizationCRLInfo, id) + var i GetOrganizationCRLInfoRow + err := row.Scan(&i.CrlKeyID, &i.CrlLastUpdate) + return &i, err +} + +const GetPeerPorts = `-- name: GetPeerPorts :many SELECT endpoint, public_endpoint FROM nodes WHERE node_type = 'fabric-peer' @@ -2534,22 +2644,22 @@ AND (endpoint IS NOT NULL OR public_endpoint IS NOT NULL) type GetPeerPortsRow struct { Endpoint sql.NullString `json:"endpoint"` - PublicEndpoint sql.NullString `json:"public_endpoint"` + PublicEndpoint sql.NullString `json:"publicEndpoint"` } -func (q *Queries) GetPeerPorts(ctx context.Context) ([]GetPeerPortsRow, error) { - rows, err := q.query(ctx, q.getPeerPortsStmt, getPeerPorts) +func (q *Queries) GetPeerPorts(ctx context.Context) ([]*GetPeerPortsRow, error) { + rows, err := q.db.QueryContext(ctx, GetPeerPorts) if err != nil { return nil, err } defer rows.Close() - items := []GetPeerPortsRow{} + items := []*GetPeerPortsRow{} for rows.Next() { var i GetPeerPortsRow if err := rows.Scan(&i.Endpoint, &i.PublicEndpoint); err != nil { return nil, err } - items = append(items, i) + items = append(items, &i) } if err := rows.Close(); err != nil { return nil, err @@ -2560,7 +2670,7 @@ func (q *Queries) GetPeerPorts(ctx context.Context) ([]GetPeerPortsRow, error) { return items, nil } -const getProvidersByNotificationType = `-- name: GetProvidersByNotificationType :many +const GetProvidersByNotificationType = `-- name: GetProvidersByNotificationType :many SELECT id, name, type, config, is_default, is_enabled, created_at, updated_at, notify_node_downtime, notify_backup_success, notify_backup_failure, notify_s3_connection_issue, last_test_at, last_test_status, last_test_message FROM notification_providers WHERE ( (? = 'NODE_DOWNTIME' AND notify_node_downtime = 1) OR @@ -2572,14 +2682,14 @@ ORDER BY created_at DESC ` type GetProvidersByNotificationTypeParams struct { - Column1 interface{} `json:"column_1"` - Column2 interface{} `json:"column_2"` - Column3 interface{} `json:"column_3"` - Column4 interface{} `json:"column_4"` + Column1 interface{} `json:"column1"` + Column2 interface{} `json:"column2"` + Column3 interface{} `json:"column3"` + Column4 interface{} `json:"column4"` } -func (q *Queries) GetProvidersByNotificationType(ctx context.Context, arg GetProvidersByNotificationTypeParams) ([]NotificationProvider, error) { - rows, err := q.query(ctx, q.getProvidersByNotificationTypeStmt, getProvidersByNotificationType, +func (q *Queries) GetProvidersByNotificationType(ctx context.Context, arg *GetProvidersByNotificationTypeParams) ([]*NotificationProvider, error) { + rows, err := q.db.QueryContext(ctx, GetProvidersByNotificationType, arg.Column1, arg.Column2, arg.Column3, @@ -2589,7 +2699,7 @@ func (q *Queries) GetProvidersByNotificationType(ctx context.Context, arg GetPro return nil, err } defer rows.Close() - items := []NotificationProvider{} + items := []*NotificationProvider{} for rows.Next() { var i NotificationProvider if err := rows.Scan( @@ -2611,7 +2721,7 @@ func (q *Queries) GetProvidersByNotificationType(ctx context.Context, arg GetPro ); err != nil { return nil, err } - items = append(items, i) + items = append(items, &i) } if err := rows.Close(); err != nil { return nil, err @@ -2622,7 +2732,7 @@ func (q *Queries) GetProvidersByNotificationType(ctx context.Context, arg GetPro return items, nil } -const getRecentCompletedBackups = `-- name: GetRecentCompletedBackups :many +const GetRecentCompletedBackups = `-- name: GetRecentCompletedBackups :many SELECT id, schedule_id, target_id, status, size_bytes, started_at, completed_at, error_message, created_at, notification_sent FROM backups WHERE (status = 'COMPLETED' OR status = 'FAILED') AND notification_sent = false @@ -2630,13 +2740,13 @@ ORDER BY completed_at DESC LIMIT 50 ` -func (q *Queries) GetRecentCompletedBackups(ctx context.Context) ([]Backup, error) { - rows, err := q.query(ctx, q.getRecentCompletedBackupsStmt, getRecentCompletedBackups) +func (q *Queries) GetRecentCompletedBackups(ctx context.Context) ([]*Backup, error) { + rows, err := q.db.QueryContext(ctx, GetRecentCompletedBackups) if err != nil { return nil, err } defer rows.Close() - items := []Backup{} + items := []*Backup{} for rows.Next() { var i Backup if err := rows.Scan( @@ -2653,7 +2763,7 @@ func (q *Queries) GetRecentCompletedBackups(ctx context.Context) ([]Backup, erro ); err != nil { return nil, err } - items = append(items, i) + items = append(items, &i) } if err := rows.Close(); err != nil { return nil, err @@ -2664,7 +2774,83 @@ func (q *Queries) GetRecentCompletedBackups(ctx context.Context) ([]Backup, erro return items, nil } -const getSession = `-- name: GetSession :one +const GetRevokedCertificate = `-- name: GetRevokedCertificate :one +SELECT id, fabric_organization_id, serial_number, revocation_time, reason, issuer_certificate_id, created_at, updated_at FROM fabric_revoked_certificates +WHERE fabric_organization_id = ? AND serial_number = ? +` + +type GetRevokedCertificateParams struct { + FabricOrganizationID int64 `json:"fabricOrganizationId"` + SerialNumber string `json:"serialNumber"` +} + +func (q *Queries) GetRevokedCertificate(ctx context.Context, arg *GetRevokedCertificateParams) (*FabricRevokedCertificate, error) { + row := q.db.QueryRowContext(ctx, GetRevokedCertificate, arg.FabricOrganizationID, arg.SerialNumber) + var i FabricRevokedCertificate + err := row.Scan( + &i.ID, + &i.FabricOrganizationID, + &i.SerialNumber, + &i.RevocationTime, + &i.Reason, + &i.IssuerCertificateID, + &i.CreatedAt, + &i.UpdatedAt, + ) + return &i, err +} + +const GetRevokedCertificateCount = `-- name: GetRevokedCertificateCount :one +SELECT COUNT(*) FROM fabric_revoked_certificates +WHERE fabric_organization_id = ? +` + +func (q *Queries) GetRevokedCertificateCount(ctx context.Context, fabricOrganizationID int64) (int64, error) { + row := q.db.QueryRowContext(ctx, GetRevokedCertificateCount, fabricOrganizationID) + var count int64 + err := row.Scan(&count) + return count, err +} + +const GetRevokedCertificates = `-- name: GetRevokedCertificates :many +SELECT id, fabric_organization_id, serial_number, revocation_time, reason, issuer_certificate_id, created_at, updated_at FROM fabric_revoked_certificates +WHERE fabric_organization_id = ? +ORDER BY revocation_time DESC +` + +func (q *Queries) GetRevokedCertificates(ctx context.Context, fabricOrganizationID int64) ([]*FabricRevokedCertificate, error) { + rows, err := q.db.QueryContext(ctx, GetRevokedCertificates, fabricOrganizationID) + if err != nil { + return nil, err + } + defer rows.Close() + items := []*FabricRevokedCertificate{} + for rows.Next() { + var i FabricRevokedCertificate + if err := rows.Scan( + &i.ID, + &i.FabricOrganizationID, + &i.SerialNumber, + &i.RevocationTime, + &i.Reason, + &i.IssuerCertificateID, + &i.CreatedAt, + &i.UpdatedAt, + ); err != nil { + return nil, err + } + items = append(items, &i) + } + if err := rows.Close(); err != nil { + return nil, err + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + +const GetSession = `-- name: GetSession :one SELECT s.id, s.session_id, s.token, s.expires_at, s.created_at, u.username FROM sessions s JOIN users u ON s.user_id = u.id @@ -2673,15 +2859,15 @@ WHERE s.session_id = ? AND s.expires_at > CURRENT_TIMESTAMP type GetSessionRow struct { ID int64 `json:"id"` - SessionID string `json:"session_id"` + SessionID string `json:"sessionId"` Token string `json:"token"` - ExpiresAt time.Time `json:"expires_at"` - CreatedAt time.Time `json:"created_at"` + ExpiresAt time.Time `json:"expiresAt"` + CreatedAt time.Time `json:"createdAt"` Username string `json:"username"` } -func (q *Queries) GetSession(ctx context.Context, sessionID string) (GetSessionRow, error) { - row := q.queryRow(ctx, q.getSessionStmt, getSession, sessionID) +func (q *Queries) GetSession(ctx context.Context, sessionID string) (*GetSessionRow, error) { + row := q.db.QueryRowContext(ctx, GetSession, sessionID) var i GetSessionRow err := row.Scan( &i.ID, @@ -2691,16 +2877,33 @@ func (q *Queries) GetSession(ctx context.Context, sessionID string) (GetSessionR &i.CreatedAt, &i.Username, ) - return i, err + return &i, err } -const getUser = `-- name: GetUser :one +const GetSetting = `-- name: GetSetting :one +SELECT id, config, created_at, updated_at FROM settings +WHERE id = ? LIMIT 1 +` + +func (q *Queries) GetSetting(ctx context.Context, id int64) (*Setting, error) { + row := q.db.QueryRowContext(ctx, GetSetting, id) + var i Setting + err := row.Scan( + &i.ID, + &i.Config, + &i.CreatedAt, + &i.UpdatedAt, + ) + return &i, err +} + +const GetUser = `-- name: GetUser :one SELECT id, username, password, name, email, role, provider, provider_id, avatar_url, created_at, last_login_at, updated_at FROM users WHERE id = ? LIMIT 1 ` -func (q *Queries) GetUser(ctx context.Context, id int64) (User, error) { - row := q.queryRow(ctx, q.getUserStmt, getUser, id) +func (q *Queries) GetUser(ctx context.Context, id int64) (*User, error) { + row := q.db.QueryRowContext(ctx, GetUser, id) var i User err := row.Scan( &i.ID, @@ -2716,16 +2919,16 @@ func (q *Queries) GetUser(ctx context.Context, id int64) (User, error) { &i.LastLoginAt, &i.UpdatedAt, ) - return i, err + return &i, err } -const getUserByUsername = `-- name: GetUserByUsername :one +const GetUserByUsername = `-- name: GetUserByUsername :one SELECT id, username, password, name, email, role, provider, provider_id, avatar_url, created_at, last_login_at, updated_at FROM users WHERE username = ? LIMIT 1 ` -func (q *Queries) GetUserByUsername(ctx context.Context, username string) (User, error) { - row := q.queryRow(ctx, q.getUserByUsernameStmt, getUserByUsername, username) +func (q *Queries) GetUserByUsername(ctx context.Context, username string) (*User, error) { + row := q.db.QueryRowContext(ctx, GetUserByUsername, username) var i User err := row.Scan( &i.ID, @@ -2741,21 +2944,21 @@ func (q *Queries) GetUserByUsername(ctx context.Context, username string) (User, &i.LastLoginAt, &i.UpdatedAt, ) - return i, err + return &i, err } -const listBackupSchedules = `-- name: ListBackupSchedules :many +const ListBackupSchedules = `-- name: ListBackupSchedules :many SELECT id, name, description, cron_expression, target_id, retention_days, enabled, created_at, updated_at, last_run_at, next_run_at FROM backup_schedules ORDER BY created_at DESC ` -func (q *Queries) ListBackupSchedules(ctx context.Context) ([]BackupSchedule, error) { - rows, err := q.query(ctx, q.listBackupSchedulesStmt, listBackupSchedules) +func (q *Queries) ListBackupSchedules(ctx context.Context) ([]*BackupSchedule, error) { + rows, err := q.db.QueryContext(ctx, ListBackupSchedules) if err != nil { return nil, err } defer rows.Close() - items := []BackupSchedule{} + items := []*BackupSchedule{} for rows.Next() { var i BackupSchedule if err := rows.Scan( @@ -2773,7 +2976,7 @@ func (q *Queries) ListBackupSchedules(ctx context.Context) ([]BackupSchedule, er ); err != nil { return nil, err } - items = append(items, i) + items = append(items, &i) } if err := rows.Close(); err != nil { return nil, err @@ -2784,18 +2987,18 @@ func (q *Queries) ListBackupSchedules(ctx context.Context) ([]BackupSchedule, er return items, nil } -const listBackupTargets = `-- name: ListBackupTargets :many +const ListBackupTargets = `-- name: ListBackupTargets :many SELECT id, name, bucket_name, region, endpoint, bucket_path, access_key_id, secret_key, s3_path_style, restic_password, type, created_at, updated_at FROM backup_targets ORDER BY created_at DESC ` -func (q *Queries) ListBackupTargets(ctx context.Context) ([]BackupTarget, error) { - rows, err := q.query(ctx, q.listBackupTargetsStmt, listBackupTargets) +func (q *Queries) ListBackupTargets(ctx context.Context) ([]*BackupTarget, error) { + rows, err := q.db.QueryContext(ctx, ListBackupTargets) if err != nil { return nil, err } defer rows.Close() - items := []BackupTarget{} + items := []*BackupTarget{} for rows.Next() { var i BackupTarget if err := rows.Scan( @@ -2815,7 +3018,7 @@ func (q *Queries) ListBackupTargets(ctx context.Context) ([]BackupTarget, error) ); err != nil { return nil, err } - items = append(items, i) + items = append(items, &i) } if err := rows.Close(); err != nil { return nil, err @@ -2826,7 +3029,7 @@ func (q *Queries) ListBackupTargets(ctx context.Context) ([]BackupTarget, error) return items, nil } -const listBackups = `-- name: ListBackups :many +const ListBackups = `-- name: ListBackups :many SELECT id, schedule_id, target_id, status, size_bytes, started_at, completed_at, error_message, created_at, notification_sent FROM backups ORDER BY created_at DESC LIMIT ? OFFSET ? @@ -2837,13 +3040,13 @@ type ListBackupsParams struct { Offset int64 `json:"offset"` } -func (q *Queries) ListBackups(ctx context.Context, arg ListBackupsParams) ([]Backup, error) { - rows, err := q.query(ctx, q.listBackupsStmt, listBackups, arg.Limit, arg.Offset) +func (q *Queries) ListBackups(ctx context.Context, arg *ListBackupsParams) ([]*Backup, error) { + rows, err := q.db.QueryContext(ctx, ListBackups, arg.Limit, arg.Offset) if err != nil { return nil, err } defer rows.Close() - items := []Backup{} + items := []*Backup{} for rows.Next() { var i Backup if err := rows.Scan( @@ -2860,7 +3063,7 @@ func (q *Queries) ListBackups(ctx context.Context, arg ListBackupsParams) ([]Bac ); err != nil { return nil, err } - items = append(items, i) + items = append(items, &i) } if err := rows.Close(); err != nil { return nil, err @@ -2871,19 +3074,19 @@ func (q *Queries) ListBackups(ctx context.Context, arg ListBackupsParams) ([]Bac return items, nil } -const listBackupsBySchedule = `-- name: ListBackupsBySchedule :many +const ListBackupsBySchedule = `-- name: ListBackupsBySchedule :many SELECT id, schedule_id, target_id, status, size_bytes, started_at, completed_at, error_message, created_at, notification_sent FROM backups WHERE schedule_id = ? ORDER BY created_at DESC ` -func (q *Queries) ListBackupsBySchedule(ctx context.Context, scheduleID sql.NullInt64) ([]Backup, error) { - rows, err := q.query(ctx, q.listBackupsByScheduleStmt, listBackupsBySchedule, scheduleID) +func (q *Queries) ListBackupsBySchedule(ctx context.Context, scheduleID sql.NullInt64) ([]*Backup, error) { + rows, err := q.db.QueryContext(ctx, ListBackupsBySchedule, scheduleID) if err != nil { return nil, err } defer rows.Close() - items := []Backup{} + items := []*Backup{} for rows.Next() { var i Backup if err := rows.Scan( @@ -2900,7 +3103,7 @@ func (q *Queries) ListBackupsBySchedule(ctx context.Context, scheduleID sql.Null ); err != nil { return nil, err } - items = append(items, i) + items = append(items, &i) } if err := rows.Close(); err != nil { return nil, err @@ -2911,19 +3114,19 @@ func (q *Queries) ListBackupsBySchedule(ctx context.Context, scheduleID sql.Null return items, nil } -const listBackupsByTarget = `-- name: ListBackupsByTarget :many +const ListBackupsByTarget = `-- name: ListBackupsByTarget :many SELECT id, schedule_id, target_id, status, size_bytes, started_at, completed_at, error_message, created_at, notification_sent FROM backups WHERE target_id = ? ORDER BY created_at DESC ` -func (q *Queries) ListBackupsByTarget(ctx context.Context, targetID int64) ([]Backup, error) { - rows, err := q.query(ctx, q.listBackupsByTargetStmt, listBackupsByTarget, targetID) +func (q *Queries) ListBackupsByTarget(ctx context.Context, targetID int64) ([]*Backup, error) { + rows, err := q.db.QueryContext(ctx, ListBackupsByTarget, targetID) if err != nil { return nil, err } defer rows.Close() - items := []Backup{} + items := []*Backup{} for rows.Next() { var i Backup if err := rows.Scan( @@ -2940,7 +3143,7 @@ func (q *Queries) ListBackupsByTarget(ctx context.Context, targetID int64) ([]Ba ); err != nil { return nil, err } - items = append(items, i) + items = append(items, &i) } if err := rows.Close(); err != nil { return nil, err @@ -2951,18 +3154,18 @@ func (q *Queries) ListBackupsByTarget(ctx context.Context, targetID int64) ([]Ba return items, nil } -const listFabricOrganizations = `-- name: ListFabricOrganizations :many -SELECT id, msp_id, description, config, ca_config, sign_key_id, tls_root_key_id, admin_tls_key_id, admin_sign_key_id, client_sign_key_id, provider_id, created_at, created_by, updated_at FROM fabric_organizations +const ListFabricOrganizations = `-- name: ListFabricOrganizations :many +SELECT id, msp_id, description, config, ca_config, sign_key_id, tls_root_key_id, admin_tls_key_id, admin_sign_key_id, client_sign_key_id, provider_id, created_at, created_by, updated_at, crl_key_id, crl_last_update FROM fabric_organizations ORDER BY created_at DESC ` -func (q *Queries) ListFabricOrganizations(ctx context.Context) ([]FabricOrganization, error) { - rows, err := q.query(ctx, q.listFabricOrganizationsStmt, listFabricOrganizations) +func (q *Queries) ListFabricOrganizations(ctx context.Context) ([]*FabricOrganization, error) { + rows, err := q.db.QueryContext(ctx, ListFabricOrganizations) if err != nil { return nil, err } defer rows.Close() - items := []FabricOrganization{} + items := []*FabricOrganization{} for rows.Next() { var i FabricOrganization if err := rows.Scan( @@ -2980,10 +3183,12 @@ func (q *Queries) ListFabricOrganizations(ctx context.Context) ([]FabricOrganiza &i.CreatedAt, &i.CreatedBy, &i.UpdatedAt, + &i.CrlKeyID, + &i.CrlLastUpdate, ); err != nil { return nil, err } - items = append(items, i) + items = append(items, &i) } if err := rows.Close(); err != nil { return nil, err @@ -2994,9 +3199,9 @@ func (q *Queries) ListFabricOrganizations(ctx context.Context) ([]FabricOrganiza return items, nil } -const listFabricOrganizationsWithKeys = `-- name: ListFabricOrganizationsWithKeys :many +const ListFabricOrganizationsWithKeys = `-- name: ListFabricOrganizationsWithKeys :many SELECT - fo.id, fo.msp_id, fo.description, fo.config, fo.ca_config, fo.sign_key_id, fo.tls_root_key_id, fo.admin_tls_key_id, fo.admin_sign_key_id, fo.client_sign_key_id, fo.provider_id, fo.created_at, fo.created_by, fo.updated_at, + fo.id, fo.msp_id, fo.description, fo.config, fo.ca_config, fo.sign_key_id, fo.tls_root_key_id, fo.admin_tls_key_id, fo.admin_sign_key_id, fo.client_sign_key_id, fo.provider_id, fo.created_at, fo.created_by, fo.updated_at, fo.crl_key_id, fo.crl_last_update, sk.public_key as sign_public_key, sk.certificate as sign_certificate, tk.public_key as tls_public_key, @@ -3011,33 +3216,35 @@ ORDER BY fo.created_at DESC type ListFabricOrganizationsWithKeysRow struct { ID int64 `json:"id"` - MspID string `json:"msp_id"` + MspID string `json:"mspId"` Description sql.NullString `json:"description"` Config sql.NullString `json:"config"` - CaConfig sql.NullString `json:"ca_config"` - SignKeyID sql.NullInt64 `json:"sign_key_id"` - TlsRootKeyID sql.NullInt64 `json:"tls_root_key_id"` - AdminTlsKeyID sql.NullInt64 `json:"admin_tls_key_id"` - AdminSignKeyID sql.NullInt64 `json:"admin_sign_key_id"` - ClientSignKeyID sql.NullInt64 `json:"client_sign_key_id"` - ProviderID sql.NullInt64 `json:"provider_id"` - CreatedAt time.Time `json:"created_at"` - CreatedBy sql.NullInt64 `json:"created_by"` - UpdatedAt sql.NullTime `json:"updated_at"` - SignPublicKey sql.NullString `json:"sign_public_key"` - SignCertificate sql.NullString `json:"sign_certificate"` - TlsPublicKey sql.NullString `json:"tls_public_key"` - TlsCertificate sql.NullString `json:"tls_certificate"` - ProviderName sql.NullString `json:"provider_name"` -} - -func (q *Queries) ListFabricOrganizationsWithKeys(ctx context.Context) ([]ListFabricOrganizationsWithKeysRow, error) { - rows, err := q.query(ctx, q.listFabricOrganizationsWithKeysStmt, listFabricOrganizationsWithKeys) + CaConfig sql.NullString `json:"caConfig"` + SignKeyID sql.NullInt64 `json:"signKeyId"` + TlsRootKeyID sql.NullInt64 `json:"tlsRootKeyId"` + AdminTlsKeyID sql.NullInt64 `json:"adminTlsKeyId"` + AdminSignKeyID sql.NullInt64 `json:"adminSignKeyId"` + ClientSignKeyID sql.NullInt64 `json:"clientSignKeyId"` + ProviderID sql.NullInt64 `json:"providerId"` + CreatedAt time.Time `json:"createdAt"` + CreatedBy sql.NullInt64 `json:"createdBy"` + UpdatedAt sql.NullTime `json:"updatedAt"` + CrlKeyID sql.NullInt64 `json:"crlKeyId"` + CrlLastUpdate sql.NullTime `json:"crlLastUpdate"` + SignPublicKey sql.NullString `json:"signPublicKey"` + SignCertificate sql.NullString `json:"signCertificate"` + TlsPublicKey sql.NullString `json:"tlsPublicKey"` + TlsCertificate sql.NullString `json:"tlsCertificate"` + ProviderName sql.NullString `json:"providerName"` +} + +func (q *Queries) ListFabricOrganizationsWithKeys(ctx context.Context) ([]*ListFabricOrganizationsWithKeysRow, error) { + rows, err := q.db.QueryContext(ctx, ListFabricOrganizationsWithKeys) if err != nil { return nil, err } defer rows.Close() - items := []ListFabricOrganizationsWithKeysRow{} + items := []*ListFabricOrganizationsWithKeysRow{} for rows.Next() { var i ListFabricOrganizationsWithKeysRow if err := rows.Scan( @@ -3055,6 +3262,8 @@ func (q *Queries) ListFabricOrganizationsWithKeys(ctx context.Context) ([]ListFa &i.CreatedAt, &i.CreatedBy, &i.UpdatedAt, + &i.CrlKeyID, + &i.CrlLastUpdate, &i.SignPublicKey, &i.SignCertificate, &i.TlsPublicKey, @@ -3063,7 +3272,7 @@ func (q *Queries) ListFabricOrganizationsWithKeys(ctx context.Context) ([]ListFa ); err != nil { return nil, err } - items = append(items, i) + items = append(items, &i) } if err := rows.Close(); err != nil { return nil, err @@ -3074,17 +3283,17 @@ func (q *Queries) ListFabricOrganizationsWithKeys(ctx context.Context) ([]ListFa return items, nil } -const listKeyProviders = `-- name: ListKeyProviders :many +const ListKeyProviders = `-- name: ListKeyProviders :many SELECT id, name, type, is_default, config, created_at, updated_at FROM key_providers ` -func (q *Queries) ListKeyProviders(ctx context.Context) ([]KeyProvider, error) { - rows, err := q.query(ctx, q.listKeyProvidersStmt, listKeyProviders) +func (q *Queries) ListKeyProviders(ctx context.Context) ([]*KeyProvider, error) { + rows, err := q.db.QueryContext(ctx, ListKeyProviders) if err != nil { return nil, err } defer rows.Close() - items := []KeyProvider{} + items := []*KeyProvider{} for rows.Next() { var i KeyProvider if err := rows.Scan( @@ -3098,7 +3307,7 @@ func (q *Queries) ListKeyProviders(ctx context.Context) ([]KeyProvider, error) { ); err != nil { return nil, err } - items = append(items, i) + items = append(items, &i) } if err := rows.Close(); err != nil { return nil, err @@ -3109,7 +3318,7 @@ func (q *Queries) ListKeyProviders(ctx context.Context) ([]KeyProvider, error) { return items, nil } -const listKeys = `-- name: ListKeys :many +const ListKeys = `-- name: ListKeys :many SELECT k.id, k.name, k.description, k.algorithm, k.key_size, k.curve, k.format, k.public_key, k.private_key, k.certificate, k.status, k.created_at, k.updated_at, k.expires_at, k.last_rotated_at, k.signing_key_id, k.sha256_fingerprint, k.sha1_fingerprint, k.provider_id, k.user_id, k.is_ca, k.ethereum_address, kp.name as provider_name, kp.type as provider_type FROM keys k JOIN key_providers kp ON k.provider_id = kp.id @@ -3127,35 +3336,35 @@ type ListKeysRow struct { Name string `json:"name"` Description sql.NullString `json:"description"` Algorithm string `json:"algorithm"` - KeySize sql.NullInt64 `json:"key_size"` + KeySize sql.NullInt64 `json:"keySize"` Curve sql.NullString `json:"curve"` Format string `json:"format"` - PublicKey string `json:"public_key"` - PrivateKey string `json:"private_key"` + PublicKey string `json:"publicKey"` + PrivateKey string `json:"privateKey"` Certificate sql.NullString `json:"certificate"` Status string `json:"status"` - CreatedAt time.Time `json:"created_at"` - UpdatedAt time.Time `json:"updated_at"` - ExpiresAt sql.NullTime `json:"expires_at"` - LastRotatedAt sql.NullTime `json:"last_rotated_at"` - SigningKeyID sql.NullInt64 `json:"signing_key_id"` - Sha256Fingerprint string `json:"sha256_fingerprint"` - Sha1Fingerprint string `json:"sha1_fingerprint"` - ProviderID int64 `json:"provider_id"` - UserID int64 `json:"user_id"` - IsCa int64 `json:"is_ca"` - EthereumAddress sql.NullString `json:"ethereum_address"` - ProviderName string `json:"provider_name"` - ProviderType string `json:"provider_type"` -} - -func (q *Queries) ListKeys(ctx context.Context, arg ListKeysParams) ([]ListKeysRow, error) { - rows, err := q.query(ctx, q.listKeysStmt, listKeys, arg.Limit, arg.Offset) + CreatedAt time.Time `json:"createdAt"` + UpdatedAt time.Time `json:"updatedAt"` + ExpiresAt sql.NullTime `json:"expiresAt"` + LastRotatedAt sql.NullTime `json:"lastRotatedAt"` + SigningKeyID sql.NullInt64 `json:"signingKeyId"` + Sha256Fingerprint string `json:"sha256Fingerprint"` + Sha1Fingerprint string `json:"sha1Fingerprint"` + ProviderID int64 `json:"providerId"` + UserID int64 `json:"userId"` + IsCa int64 `json:"isCa"` + EthereumAddress sql.NullString `json:"ethereumAddress"` + ProviderName string `json:"providerName"` + ProviderType string `json:"providerType"` +} + +func (q *Queries) ListKeys(ctx context.Context, arg *ListKeysParams) ([]*ListKeysRow, error) { + rows, err := q.db.QueryContext(ctx, ListKeys, arg.Limit, arg.Offset) if err != nil { return nil, err } defer rows.Close() - items := []ListKeysRow{} + items := []*ListKeysRow{} for rows.Next() { var i ListKeysRow if err := rows.Scan( @@ -3186,7 +3395,7 @@ func (q *Queries) ListKeys(ctx context.Context, arg ListKeysParams) ([]ListKeysR ); err != nil { return nil, err } - items = append(items, i) + items = append(items, &i) } if err := rows.Close(); err != nil { return nil, err @@ -3197,19 +3406,19 @@ func (q *Queries) ListKeys(ctx context.Context, arg ListKeysParams) ([]ListKeysR return items, nil } -const listNetworkNodesByNetwork = `-- name: ListNetworkNodesByNetwork :many +const ListNetworkNodesByNetwork = `-- name: ListNetworkNodesByNetwork :many SELECT id, network_id, node_id, role, status, config, created_at, updated_at FROM network_nodes WHERE network_id = ? ORDER BY created_at DESC ` -func (q *Queries) ListNetworkNodesByNetwork(ctx context.Context, networkID int64) ([]NetworkNode, error) { - rows, err := q.query(ctx, q.listNetworkNodesByNetworkStmt, listNetworkNodesByNetwork, networkID) +func (q *Queries) ListNetworkNodesByNetwork(ctx context.Context, networkID int64) ([]*NetworkNode, error) { + rows, err := q.db.QueryContext(ctx, ListNetworkNodesByNetwork, networkID) if err != nil { return nil, err } defer rows.Close() - items := []NetworkNode{} + items := []*NetworkNode{} for rows.Next() { var i NetworkNode if err := rows.Scan( @@ -3224,7 +3433,7 @@ func (q *Queries) ListNetworkNodesByNetwork(ctx context.Context, networkID int64 ); err != nil { return nil, err } - items = append(items, i) + items = append(items, &i) } if err := rows.Close(); err != nil { return nil, err @@ -3235,19 +3444,19 @@ func (q *Queries) ListNetworkNodesByNetwork(ctx context.Context, networkID int64 return items, nil } -const listNetworkNodesByNode = `-- name: ListNetworkNodesByNode :many +const ListNetworkNodesByNode = `-- name: ListNetworkNodesByNode :many SELECT id, network_id, node_id, role, status, config, created_at, updated_at FROM network_nodes WHERE node_id = ? ORDER BY created_at DESC ` -func (q *Queries) ListNetworkNodesByNode(ctx context.Context, nodeID int64) ([]NetworkNode, error) { - rows, err := q.query(ctx, q.listNetworkNodesByNodeStmt, listNetworkNodesByNode, nodeID) +func (q *Queries) ListNetworkNodesByNode(ctx context.Context, nodeID int64) ([]*NetworkNode, error) { + rows, err := q.db.QueryContext(ctx, ListNetworkNodesByNode, nodeID) if err != nil { return nil, err } defer rows.Close() - items := []NetworkNode{} + items := []*NetworkNode{} for rows.Next() { var i NetworkNode if err := rows.Scan( @@ -3262,7 +3471,7 @@ func (q *Queries) ListNetworkNodesByNode(ctx context.Context, nodeID int64) ([]N ); err != nil { return nil, err } - items = append(items, i) + items = append(items, &i) } if err := rows.Close(); err != nil { return nil, err @@ -3273,18 +3482,18 @@ func (q *Queries) ListNetworkNodesByNode(ctx context.Context, nodeID int64) ([]N return items, nil } -const listNetworks = `-- name: ListNetworks :many +const ListNetworks = `-- name: ListNetworks :many SELECT id, name, network_id, platform, status, description, config, deployment_config, exposed_ports, domain, created_at, created_by, updated_at, genesis_block_b64, current_config_block_b64 FROM networks ORDER BY created_at DESC ` -func (q *Queries) ListNetworks(ctx context.Context) ([]Network, error) { - rows, err := q.query(ctx, q.listNetworksStmt, listNetworks) +func (q *Queries) ListNetworks(ctx context.Context) ([]*Network, error) { + rows, err := q.db.QueryContext(ctx, ListNetworks) if err != nil { return nil, err } defer rows.Close() - items := []Network{} + items := []*Network{} for rows.Next() { var i Network if err := rows.Scan( @@ -3306,7 +3515,7 @@ func (q *Queries) ListNetworks(ctx context.Context) ([]Network, error) { ); err != nil { return nil, err } - items = append(items, i) + items = append(items, &i) } if err := rows.Close(); err != nil { return nil, err @@ -3317,7 +3526,7 @@ func (q *Queries) ListNetworks(ctx context.Context) ([]Network, error) { return items, nil } -const listNodeEvents = `-- name: ListNodeEvents :many +const ListNodeEvents = `-- name: ListNodeEvents :many SELECT id, node_id, event_type, description, data, status, created_at FROM node_events WHERE node_id = ? ORDER BY created_at DESC @@ -3325,18 +3534,18 @@ LIMIT ? OFFSET ? ` type ListNodeEventsParams struct { - NodeID int64 `json:"node_id"` + NodeID int64 `json:"nodeId"` Limit int64 `json:"limit"` Offset int64 `json:"offset"` } -func (q *Queries) ListNodeEvents(ctx context.Context, arg ListNodeEventsParams) ([]NodeEvent, error) { - rows, err := q.query(ctx, q.listNodeEventsStmt, listNodeEvents, arg.NodeID, arg.Limit, arg.Offset) +func (q *Queries) ListNodeEvents(ctx context.Context, arg *ListNodeEventsParams) ([]*NodeEvent, error) { + rows, err := q.db.QueryContext(ctx, ListNodeEvents, arg.NodeID, arg.Limit, arg.Offset) if err != nil { return nil, err } defer rows.Close() - items := []NodeEvent{} + items := []*NodeEvent{} for rows.Next() { var i NodeEvent if err := rows.Scan( @@ -3350,7 +3559,7 @@ func (q *Queries) ListNodeEvents(ctx context.Context, arg ListNodeEventsParams) ); err != nil { return nil, err } - items = append(items, i) + items = append(items, &i) } if err := rows.Close(); err != nil { return nil, err @@ -3361,7 +3570,7 @@ func (q *Queries) ListNodeEvents(ctx context.Context, arg ListNodeEventsParams) return items, nil } -const listNodeEventsByType = `-- name: ListNodeEventsByType :many +const ListNodeEventsByType = `-- name: ListNodeEventsByType :many SELECT id, node_id, event_type, description, data, status, created_at FROM node_events WHERE node_id = ? AND event_type = ? ORDER BY created_at DESC @@ -3369,14 +3578,14 @@ LIMIT ? OFFSET ? ` type ListNodeEventsByTypeParams struct { - NodeID int64 `json:"node_id"` - EventType string `json:"event_type"` + NodeID int64 `json:"nodeId"` + EventType string `json:"eventType"` Limit int64 `json:"limit"` Offset int64 `json:"offset"` } -func (q *Queries) ListNodeEventsByType(ctx context.Context, arg ListNodeEventsByTypeParams) ([]NodeEvent, error) { - rows, err := q.query(ctx, q.listNodeEventsByTypeStmt, listNodeEventsByType, +func (q *Queries) ListNodeEventsByType(ctx context.Context, arg *ListNodeEventsByTypeParams) ([]*NodeEvent, error) { + rows, err := q.db.QueryContext(ctx, ListNodeEventsByType, arg.NodeID, arg.EventType, arg.Limit, @@ -3386,7 +3595,7 @@ func (q *Queries) ListNodeEventsByType(ctx context.Context, arg ListNodeEventsBy return nil, err } defer rows.Close() - items := []NodeEvent{} + items := []*NodeEvent{} for rows.Next() { var i NodeEvent if err := rows.Scan( @@ -3400,7 +3609,7 @@ func (q *Queries) ListNodeEventsByType(ctx context.Context, arg ListNodeEventsBy ); err != nil { return nil, err } - items = append(items, i) + items = append(items, &i) } if err := rows.Close(); err != nil { return nil, err @@ -3411,8 +3620,8 @@ func (q *Queries) ListNodeEventsByType(ctx context.Context, arg ListNodeEventsBy return items, nil } -const listNodes = `-- name: ListNodes :many -SELECT id, name, slug, platform, status, description, network_id, config, resources, endpoint, public_endpoint, p2p_address, created_at, created_by, updated_at, fabric_organization_id, node_type, node_config, deployment_config FROM nodes +const ListNodes = `-- name: ListNodes :many +SELECT id, name, slug, platform, status, description, network_id, config, resources, endpoint, public_endpoint, p2p_address, created_at, created_by, updated_at, fabric_organization_id, node_type, node_config, deployment_config, error_message FROM nodes ORDER BY created_at DESC LIMIT ? OFFSET ? ` @@ -3422,13 +3631,13 @@ type ListNodesParams struct { Offset int64 `json:"offset"` } -func (q *Queries) ListNodes(ctx context.Context, arg ListNodesParams) ([]Node, error) { - rows, err := q.query(ctx, q.listNodesStmt, listNodes, arg.Limit, arg.Offset) +func (q *Queries) ListNodes(ctx context.Context, arg *ListNodesParams) ([]*Node, error) { + rows, err := q.db.QueryContext(ctx, ListNodes, arg.Limit, arg.Offset) if err != nil { return nil, err } defer rows.Close() - items := []Node{} + items := []*Node{} for rows.Next() { var i Node if err := rows.Scan( @@ -3451,10 +3660,11 @@ func (q *Queries) ListNodes(ctx context.Context, arg ListNodesParams) ([]Node, e &i.NodeType, &i.NodeConfig, &i.DeploymentConfig, + &i.ErrorMessage, ); err != nil { return nil, err } - items = append(items, i) + items = append(items, &i) } if err := rows.Close(); err != nil { return nil, err @@ -3465,26 +3675,26 @@ func (q *Queries) ListNodes(ctx context.Context, arg ListNodesParams) ([]Node, e return items, nil } -const listNodesByNetwork = `-- name: ListNodesByNetwork :many -SELECT id, name, slug, platform, status, description, network_id, config, resources, endpoint, public_endpoint, p2p_address, created_at, created_by, updated_at, fabric_organization_id, node_type, node_config, deployment_config FROM nodes +const ListNodesByNetwork = `-- name: ListNodesByNetwork :many +SELECT id, name, slug, platform, status, description, network_id, config, resources, endpoint, public_endpoint, p2p_address, created_at, created_by, updated_at, fabric_organization_id, node_type, node_config, deployment_config, error_message FROM nodes WHERE network_id = ? ORDER BY created_at DESC LIMIT ? OFFSET ? ` type ListNodesByNetworkParams struct { - NetworkID sql.NullInt64 `json:"network_id"` + NetworkID sql.NullInt64 `json:"networkId"` Limit int64 `json:"limit"` Offset int64 `json:"offset"` } -func (q *Queries) ListNodesByNetwork(ctx context.Context, arg ListNodesByNetworkParams) ([]Node, error) { - rows, err := q.query(ctx, q.listNodesByNetworkStmt, listNodesByNetwork, arg.NetworkID, arg.Limit, arg.Offset) +func (q *Queries) ListNodesByNetwork(ctx context.Context, arg *ListNodesByNetworkParams) ([]*Node, error) { + rows, err := q.db.QueryContext(ctx, ListNodesByNetwork, arg.NetworkID, arg.Limit, arg.Offset) if err != nil { return nil, err } defer rows.Close() - items := []Node{} + items := []*Node{} for rows.Next() { var i Node if err := rows.Scan( @@ -3507,10 +3717,11 @@ func (q *Queries) ListNodesByNetwork(ctx context.Context, arg ListNodesByNetwork &i.NodeType, &i.NodeConfig, &i.DeploymentConfig, + &i.ErrorMessage, ); err != nil { return nil, err } - items = append(items, i) + items = append(items, &i) } if err := rows.Close(); err != nil { return nil, err @@ -3521,8 +3732,8 @@ func (q *Queries) ListNodesByNetwork(ctx context.Context, arg ListNodesByNetwork return items, nil } -const listNodesByPlatform = `-- name: ListNodesByPlatform :many -SELECT id, name, slug, platform, status, description, network_id, config, resources, endpoint, public_endpoint, p2p_address, created_at, created_by, updated_at, fabric_organization_id, node_type, node_config, deployment_config FROM nodes +const ListNodesByPlatform = `-- name: ListNodesByPlatform :many +SELECT id, name, slug, platform, status, description, network_id, config, resources, endpoint, public_endpoint, p2p_address, created_at, created_by, updated_at, fabric_organization_id, node_type, node_config, deployment_config, error_message FROM nodes WHERE platform = ? ORDER BY created_at DESC LIMIT ? OFFSET ? @@ -3534,13 +3745,13 @@ type ListNodesByPlatformParams struct { Offset int64 `json:"offset"` } -func (q *Queries) ListNodesByPlatform(ctx context.Context, arg ListNodesByPlatformParams) ([]Node, error) { - rows, err := q.query(ctx, q.listNodesByPlatformStmt, listNodesByPlatform, arg.Platform, arg.Limit, arg.Offset) +func (q *Queries) ListNodesByPlatform(ctx context.Context, arg *ListNodesByPlatformParams) ([]*Node, error) { + rows, err := q.db.QueryContext(ctx, ListNodesByPlatform, arg.Platform, arg.Limit, arg.Offset) if err != nil { return nil, err } defer rows.Close() - items := []Node{} + items := []*Node{} for rows.Next() { var i Node if err := rows.Scan( @@ -3563,10 +3774,11 @@ func (q *Queries) ListNodesByPlatform(ctx context.Context, arg ListNodesByPlatfo &i.NodeType, &i.NodeConfig, &i.DeploymentConfig, + &i.ErrorMessage, ); err != nil { return nil, err } - items = append(items, i) + items = append(items, &i) } if err := rows.Close(); err != nil { return nil, err @@ -3577,18 +3789,18 @@ func (q *Queries) ListNodesByPlatform(ctx context.Context, arg ListNodesByPlatfo return items, nil } -const listNotificationProviders = `-- name: ListNotificationProviders :many +const ListNotificationProviders = `-- name: ListNotificationProviders :many SELECT id, name, type, config, is_default, is_enabled, created_at, updated_at, notify_node_downtime, notify_backup_success, notify_backup_failure, notify_s3_connection_issue, last_test_at, last_test_status, last_test_message FROM notification_providers ORDER BY created_at DESC ` -func (q *Queries) ListNotificationProviders(ctx context.Context) ([]NotificationProvider, error) { - rows, err := q.query(ctx, q.listNotificationProvidersStmt, listNotificationProviders) +func (q *Queries) ListNotificationProviders(ctx context.Context) ([]*NotificationProvider, error) { + rows, err := q.db.QueryContext(ctx, ListNotificationProviders) if err != nil { return nil, err } defer rows.Close() - items := []NotificationProvider{} + items := []*NotificationProvider{} for rows.Next() { var i NotificationProvider if err := rows.Scan( @@ -3610,7 +3822,7 @@ func (q *Queries) ListNotificationProviders(ctx context.Context) ([]Notification ); err != nil { return nil, err } - items = append(items, i) + items = append(items, &i) } if err := rows.Close(); err != nil { return nil, err @@ -3621,18 +3833,51 @@ func (q *Queries) ListNotificationProviders(ctx context.Context) ([]Notification return items, nil } -const listUsers = `-- name: ListUsers :many +const ListSettings = `-- name: ListSettings :many +SELECT id, config, created_at, updated_at FROM settings +ORDER BY created_at DESC +` + +func (q *Queries) ListSettings(ctx context.Context) ([]*Setting, error) { + rows, err := q.db.QueryContext(ctx, ListSettings) + if err != nil { + return nil, err + } + defer rows.Close() + items := []*Setting{} + for rows.Next() { + var i Setting + if err := rows.Scan( + &i.ID, + &i.Config, + &i.CreatedAt, + &i.UpdatedAt, + ); err != nil { + return nil, err + } + items = append(items, &i) + } + if err := rows.Close(); err != nil { + return nil, err + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + +const ListUsers = `-- name: ListUsers :many SELECT id, username, password, name, email, role, provider, provider_id, avatar_url, created_at, last_login_at, updated_at FROM users ORDER BY created_at DESC ` -func (q *Queries) ListUsers(ctx context.Context) ([]User, error) { - rows, err := q.query(ctx, q.listUsersStmt, listUsers) +func (q *Queries) ListUsers(ctx context.Context) ([]*User, error) { + rows, err := q.db.QueryContext(ctx, ListUsers) if err != nil { return nil, err } defer rows.Close() - items := []User{} + items := []*User{} for rows.Next() { var i User if err := rows.Scan( @@ -3651,7 +3896,7 @@ func (q *Queries) ListUsers(ctx context.Context) ([]User, error) { ); err != nil { return nil, err } - items = append(items, i) + items = append(items, &i) } if err := rows.Close(); err != nil { return nil, err @@ -3662,18 +3907,18 @@ func (q *Queries) ListUsers(ctx context.Context) ([]User, error) { return items, nil } -const markBackupNotified = `-- name: MarkBackupNotified :exec +const MarkBackupNotified = `-- name: MarkBackupNotified :exec UPDATE backups SET notification_sent = true WHERE id = ? ` func (q *Queries) MarkBackupNotified(ctx context.Context, id int64) error { - _, err := q.exec(ctx, q.markBackupNotifiedStmt, markBackupNotified, id) + _, err := q.db.ExecContext(ctx, MarkBackupNotified, id) return err } -const unsetDefaultNotificationProvider = `-- name: UnsetDefaultNotificationProvider :exec +const UnsetDefaultNotificationProvider = `-- name: UnsetDefaultNotificationProvider :exec UPDATE notification_providers SET is_default = 0, updated_at = CURRENT_TIMESTAMP @@ -3681,20 +3926,20 @@ WHERE type = ? AND is_default = 1 ` func (q *Queries) UnsetDefaultNotificationProvider(ctx context.Context, type_ string) error { - _, err := q.exec(ctx, q.unsetDefaultNotificationProviderStmt, unsetDefaultNotificationProvider, type_) + _, err := q.db.ExecContext(ctx, UnsetDefaultNotificationProvider, type_) return err } -const unsetDefaultProvider = `-- name: UnsetDefaultProvider :exec +const UnsetDefaultProvider = `-- name: UnsetDefaultProvider :exec UPDATE key_providers SET is_default = 0 WHERE is_default = 1 ` func (q *Queries) UnsetDefaultProvider(ctx context.Context) error { - _, err := q.exec(ctx, q.unsetDefaultProviderStmt, unsetDefaultProvider) + _, err := q.db.ExecContext(ctx, UnsetDefaultProvider) return err } -const updateBackupCompleted = `-- name: UpdateBackupCompleted :one +const UpdateBackupCompleted = `-- name: UpdateBackupCompleted :one UPDATE backups SET status = ?, completed_at = ? @@ -3704,12 +3949,12 @@ RETURNING id, schedule_id, target_id, status, size_bytes, started_at, completed_ type UpdateBackupCompletedParams struct { Status string `json:"status"` - CompletedAt sql.NullTime `json:"completed_at"` + CompletedAt sql.NullTime `json:"completedAt"` ID int64 `json:"id"` } -func (q *Queries) UpdateBackupCompleted(ctx context.Context, arg UpdateBackupCompletedParams) (Backup, error) { - row := q.queryRow(ctx, q.updateBackupCompletedStmt, updateBackupCompleted, arg.Status, arg.CompletedAt, arg.ID) +func (q *Queries) UpdateBackupCompleted(ctx context.Context, arg *UpdateBackupCompletedParams) (*Backup, error) { + row := q.db.QueryRowContext(ctx, UpdateBackupCompleted, arg.Status, arg.CompletedAt, arg.ID) var i Backup err := row.Scan( &i.ID, @@ -3723,10 +3968,10 @@ func (q *Queries) UpdateBackupCompleted(ctx context.Context, arg UpdateBackupCom &i.CreatedAt, &i.NotificationSent, ) - return i, err + return &i, err } -const updateBackupFailed = `-- name: UpdateBackupFailed :one +const UpdateBackupFailed = `-- name: UpdateBackupFailed :one UPDATE backups SET status = ?, error_message = ?, @@ -3737,13 +3982,13 @@ RETURNING id, schedule_id, target_id, status, size_bytes, started_at, completed_ type UpdateBackupFailedParams struct { Status string `json:"status"` - ErrorMessage sql.NullString `json:"error_message"` - CompletedAt sql.NullTime `json:"completed_at"` + ErrorMessage sql.NullString `json:"errorMessage"` + CompletedAt sql.NullTime `json:"completedAt"` ID int64 `json:"id"` } -func (q *Queries) UpdateBackupFailed(ctx context.Context, arg UpdateBackupFailedParams) (Backup, error) { - row := q.queryRow(ctx, q.updateBackupFailedStmt, updateBackupFailed, +func (q *Queries) UpdateBackupFailed(ctx context.Context, arg *UpdateBackupFailedParams) (*Backup, error) { + row := q.db.QueryRowContext(ctx, UpdateBackupFailed, arg.Status, arg.ErrorMessage, arg.CompletedAt, @@ -3762,10 +4007,10 @@ func (q *Queries) UpdateBackupFailed(ctx context.Context, arg UpdateBackupFailed &i.CreatedAt, &i.NotificationSent, ) - return i, err + return &i, err } -const updateBackupSchedule = `-- name: UpdateBackupSchedule :one +const UpdateBackupSchedule = `-- name: UpdateBackupSchedule :one UPDATE backup_schedules SET name = ?, description = ?, @@ -3781,15 +4026,15 @@ RETURNING id, name, description, cron_expression, target_id, retention_days, ena type UpdateBackupScheduleParams struct { Name string `json:"name"` Description sql.NullString `json:"description"` - CronExpression string `json:"cron_expression"` - TargetID int64 `json:"target_id"` - RetentionDays int64 `json:"retention_days"` + CronExpression string `json:"cronExpression"` + TargetID int64 `json:"targetId"` + RetentionDays int64 `json:"retentionDays"` Enabled bool `json:"enabled"` ID int64 `json:"id"` } -func (q *Queries) UpdateBackupSchedule(ctx context.Context, arg UpdateBackupScheduleParams) (BackupSchedule, error) { - row := q.queryRow(ctx, q.updateBackupScheduleStmt, updateBackupSchedule, +func (q *Queries) UpdateBackupSchedule(ctx context.Context, arg *UpdateBackupScheduleParams) (*BackupSchedule, error) { + row := q.db.QueryRowContext(ctx, UpdateBackupSchedule, arg.Name, arg.Description, arg.CronExpression, @@ -3812,10 +4057,10 @@ func (q *Queries) UpdateBackupSchedule(ctx context.Context, arg UpdateBackupSche &i.LastRunAt, &i.NextRunAt, ) - return i, err + return &i, err } -const updateBackupScheduleLastRun = `-- name: UpdateBackupScheduleLastRun :one +const UpdateBackupScheduleLastRun = `-- name: UpdateBackupScheduleLastRun :one UPDATE backup_schedules SET last_run_at = ?, next_run_at = ?, @@ -3825,13 +4070,13 @@ RETURNING id, name, description, cron_expression, target_id, retention_days, ena ` type UpdateBackupScheduleLastRunParams struct { - LastRunAt sql.NullTime `json:"last_run_at"` - NextRunAt sql.NullTime `json:"next_run_at"` + LastRunAt sql.NullTime `json:"lastRunAt"` + NextRunAt sql.NullTime `json:"nextRunAt"` ID int64 `json:"id"` } -func (q *Queries) UpdateBackupScheduleLastRun(ctx context.Context, arg UpdateBackupScheduleLastRunParams) (BackupSchedule, error) { - row := q.queryRow(ctx, q.updateBackupScheduleLastRunStmt, updateBackupScheduleLastRun, arg.LastRunAt, arg.NextRunAt, arg.ID) +func (q *Queries) UpdateBackupScheduleLastRun(ctx context.Context, arg *UpdateBackupScheduleLastRunParams) (*BackupSchedule, error) { + row := q.db.QueryRowContext(ctx, UpdateBackupScheduleLastRun, arg.LastRunAt, arg.NextRunAt, arg.ID) var i BackupSchedule err := row.Scan( &i.ID, @@ -3846,10 +4091,10 @@ func (q *Queries) UpdateBackupScheduleLastRun(ctx context.Context, arg UpdateBac &i.LastRunAt, &i.NextRunAt, ) - return i, err + return &i, err } -const updateBackupSize = `-- name: UpdateBackupSize :one +const UpdateBackupSize = `-- name: UpdateBackupSize :one UPDATE backups SET size_bytes = ? WHERE id = ? @@ -3857,12 +4102,12 @@ RETURNING id, schedule_id, target_id, status, size_bytes, started_at, completed_ ` type UpdateBackupSizeParams struct { - SizeBytes sql.NullInt64 `json:"size_bytes"` + SizeBytes sql.NullInt64 `json:"sizeBytes"` ID int64 `json:"id"` } -func (q *Queries) UpdateBackupSize(ctx context.Context, arg UpdateBackupSizeParams) (Backup, error) { - row := q.queryRow(ctx, q.updateBackupSizeStmt, updateBackupSize, arg.SizeBytes, arg.ID) +func (q *Queries) UpdateBackupSize(ctx context.Context, arg *UpdateBackupSizeParams) (*Backup, error) { + row := q.db.QueryRowContext(ctx, UpdateBackupSize, arg.SizeBytes, arg.ID) var i Backup err := row.Scan( &i.ID, @@ -3876,10 +4121,10 @@ func (q *Queries) UpdateBackupSize(ctx context.Context, arg UpdateBackupSizePara &i.CreatedAt, &i.NotificationSent, ) - return i, err + return &i, err } -const updateBackupStatus = `-- name: UpdateBackupStatus :one +const UpdateBackupStatus = `-- name: UpdateBackupStatus :one UPDATE backups SET status = ? WHERE id = ? @@ -3891,8 +4136,8 @@ type UpdateBackupStatusParams struct { ID int64 `json:"id"` } -func (q *Queries) UpdateBackupStatus(ctx context.Context, arg UpdateBackupStatusParams) (Backup, error) { - row := q.queryRow(ctx, q.updateBackupStatusStmt, updateBackupStatus, arg.Status, arg.ID) +func (q *Queries) UpdateBackupStatus(ctx context.Context, arg *UpdateBackupStatusParams) (*Backup, error) { + row := q.db.QueryRowContext(ctx, UpdateBackupStatus, arg.Status, arg.ID) var i Backup err := row.Scan( &i.ID, @@ -3906,10 +4151,10 @@ func (q *Queries) UpdateBackupStatus(ctx context.Context, arg UpdateBackupStatus &i.CreatedAt, &i.NotificationSent, ) - return i, err + return &i, err } -const updateBackupTarget = `-- name: UpdateBackupTarget :one +const UpdateBackupTarget = `-- name: UpdateBackupTarget :one UPDATE backup_targets SET name = ?, type = ?, @@ -3928,18 +4173,18 @@ RETURNING id, name, bucket_name, region, endpoint, bucket_path, access_key_id, s type UpdateBackupTargetParams struct { Name string `json:"name"` Type string `json:"type"` - BucketName sql.NullString `json:"bucket_name"` + BucketName sql.NullString `json:"bucketName"` Region sql.NullString `json:"region"` Endpoint sql.NullString `json:"endpoint"` - BucketPath sql.NullString `json:"bucket_path"` - AccessKeyID sql.NullString `json:"access_key_id"` - SecretKey sql.NullString `json:"secret_key"` - S3PathStyle sql.NullBool `json:"s3_path_style"` + BucketPath sql.NullString `json:"bucketPath"` + AccessKeyID sql.NullString `json:"accessKeyId"` + SecretKey sql.NullString `json:"secretKey"` + S3PathStyle sql.NullBool `json:"s3PathStyle"` ID int64 `json:"id"` } -func (q *Queries) UpdateBackupTarget(ctx context.Context, arg UpdateBackupTargetParams) (BackupTarget, error) { - row := q.queryRow(ctx, q.updateBackupTargetStmt, updateBackupTarget, +func (q *Queries) UpdateBackupTarget(ctx context.Context, arg *UpdateBackupTargetParams) (*BackupTarget, error) { + row := q.db.QueryRowContext(ctx, UpdateBackupTarget, arg.Name, arg.Type, arg.BucketName, @@ -3967,14 +4212,55 @@ func (q *Queries) UpdateBackupTarget(ctx context.Context, arg UpdateBackupTarget &i.CreatedAt, &i.UpdatedAt, ) - return i, err + return &i, err +} + +const UpdateDeploymentConfig = `-- name: UpdateDeploymentConfig :one +UPDATE nodes +SET deployment_config = ?, + updated_at = CURRENT_TIMESTAMP +WHERE id = ? +RETURNING id, name, slug, platform, status, description, network_id, config, resources, endpoint, public_endpoint, p2p_address, created_at, created_by, updated_at, fabric_organization_id, node_type, node_config, deployment_config, error_message +` + +type UpdateDeploymentConfigParams struct { + DeploymentConfig sql.NullString `json:"deploymentConfig"` + ID int64 `json:"id"` } -const updateFabricOrganization = `-- name: UpdateFabricOrganization :one +func (q *Queries) UpdateDeploymentConfig(ctx context.Context, arg *UpdateDeploymentConfigParams) (*Node, error) { + row := q.db.QueryRowContext(ctx, UpdateDeploymentConfig, arg.DeploymentConfig, arg.ID) + var i Node + err := row.Scan( + &i.ID, + &i.Name, + &i.Slug, + &i.Platform, + &i.Status, + &i.Description, + &i.NetworkID, + &i.Config, + &i.Resources, + &i.Endpoint, + &i.PublicEndpoint, + &i.P2pAddress, + &i.CreatedAt, + &i.CreatedBy, + &i.UpdatedAt, + &i.FabricOrganizationID, + &i.NodeType, + &i.NodeConfig, + &i.DeploymentConfig, + &i.ErrorMessage, + ) + return &i, err +} + +const UpdateFabricOrganization = `-- name: UpdateFabricOrganization :one UPDATE fabric_organizations SET description = ? WHERE id = ? -RETURNING id, msp_id, description, config, ca_config, sign_key_id, tls_root_key_id, admin_tls_key_id, admin_sign_key_id, client_sign_key_id, provider_id, created_at, created_by, updated_at +RETURNING id, msp_id, description, config, ca_config, sign_key_id, tls_root_key_id, admin_tls_key_id, admin_sign_key_id, client_sign_key_id, provider_id, created_at, created_by, updated_at, crl_key_id, crl_last_update ` type UpdateFabricOrganizationParams struct { @@ -3982,8 +4268,8 @@ type UpdateFabricOrganizationParams struct { ID int64 `json:"id"` } -func (q *Queries) UpdateFabricOrganization(ctx context.Context, arg UpdateFabricOrganizationParams) (FabricOrganization, error) { - row := q.queryRow(ctx, q.updateFabricOrganizationStmt, updateFabricOrganization, arg.Description, arg.ID) +func (q *Queries) UpdateFabricOrganization(ctx context.Context, arg *UpdateFabricOrganizationParams) (*FabricOrganization, error) { + row := q.db.QueryRowContext(ctx, UpdateFabricOrganization, arg.Description, arg.ID) var i FabricOrganization err := row.Scan( &i.ID, @@ -4000,11 +4286,13 @@ func (q *Queries) UpdateFabricOrganization(ctx context.Context, arg UpdateFabric &i.CreatedAt, &i.CreatedBy, &i.UpdatedAt, + &i.CrlKeyID, + &i.CrlLastUpdate, ) - return i, err + return &i, err } -const updateKey = `-- name: UpdateKey :one +const UpdateKey = `-- name: UpdateKey :one UPDATE keys SET name = ?, description = ?, @@ -4022,7 +4310,8 @@ SET name = ?, provider_id = ?, user_id = ?, ethereum_address = ?, - updated_at = CURRENT_TIMESTAMP + updated_at = CURRENT_TIMESTAMP, + signing_key_id = ? WHERE id = ? RETURNING id, name, description, algorithm, key_size, curve, format, public_key, private_key, certificate, status, created_at, updated_at, expires_at, last_rotated_at, signing_key_id, sha256_fingerprint, sha1_fingerprint, provider_id, user_id, is_ca, ethereum_address ` @@ -4031,24 +4320,25 @@ type UpdateKeyParams struct { Name string `json:"name"` Description sql.NullString `json:"description"` Algorithm string `json:"algorithm"` - KeySize sql.NullInt64 `json:"key_size"` + KeySize sql.NullInt64 `json:"keySize"` Curve sql.NullString `json:"curve"` Format string `json:"format"` - PublicKey string `json:"public_key"` - PrivateKey string `json:"private_key"` + PublicKey string `json:"publicKey"` + PrivateKey string `json:"privateKey"` Certificate sql.NullString `json:"certificate"` Status string `json:"status"` - ExpiresAt sql.NullTime `json:"expires_at"` - Sha256Fingerprint string `json:"sha256_fingerprint"` - Sha1Fingerprint string `json:"sha1_fingerprint"` - ProviderID int64 `json:"provider_id"` - UserID int64 `json:"user_id"` - EthereumAddress sql.NullString `json:"ethereum_address"` + ExpiresAt sql.NullTime `json:"expiresAt"` + Sha256Fingerprint string `json:"sha256Fingerprint"` + Sha1Fingerprint string `json:"sha1Fingerprint"` + ProviderID int64 `json:"providerId"` + UserID int64 `json:"userId"` + EthereumAddress sql.NullString `json:"ethereumAddress"` + SigningKeyID sql.NullInt64 `json:"signingKeyId"` ID int64 `json:"id"` } -func (q *Queries) UpdateKey(ctx context.Context, arg UpdateKeyParams) (Key, error) { - row := q.queryRow(ctx, q.updateKeyStmt, updateKey, +func (q *Queries) UpdateKey(ctx context.Context, arg *UpdateKeyParams) (*Key, error) { + row := q.db.QueryRowContext(ctx, UpdateKey, arg.Name, arg.Description, arg.Algorithm, @@ -4065,6 +4355,7 @@ func (q *Queries) UpdateKey(ctx context.Context, arg UpdateKeyParams) (Key, erro arg.ProviderID, arg.UserID, arg.EthereumAddress, + arg.SigningKeyID, arg.ID, ) var i Key @@ -4092,10 +4383,10 @@ func (q *Queries) UpdateKey(ctx context.Context, arg UpdateKeyParams) (Key, erro &i.IsCa, &i.EthereumAddress, ) - return i, err + return &i, err } -const updateKeyProvider = `-- name: UpdateKeyProvider :one +const UpdateKeyProvider = `-- name: UpdateKeyProvider :one UPDATE key_providers SET name = ?, type = ?, @@ -4109,13 +4400,13 @@ RETURNING id, name, type, is_default, config, created_at, updated_at type UpdateKeyProviderParams struct { Name string `json:"name"` Type string `json:"type"` - IsDefault int64 `json:"is_default"` + IsDefault int64 `json:"isDefault"` Config string `json:"config"` ID int64 `json:"id"` } -func (q *Queries) UpdateKeyProvider(ctx context.Context, arg UpdateKeyProviderParams) (KeyProvider, error) { - row := q.queryRow(ctx, q.updateKeyProviderStmt, updateKeyProvider, +func (q *Queries) UpdateKeyProvider(ctx context.Context, arg *UpdateKeyProviderParams) (*KeyProvider, error) { + row := q.db.QueryRowContext(ctx, UpdateKeyProvider, arg.Name, arg.Type, arg.IsDefault, @@ -4132,10 +4423,10 @@ func (q *Queries) UpdateKeyProvider(ctx context.Context, arg UpdateKeyProviderPa &i.CreatedAt, &i.UpdatedAt, ) - return i, err + return &i, err } -const updateNetworkCurrentConfigBlock = `-- name: UpdateNetworkCurrentConfigBlock :exec +const UpdateNetworkCurrentConfigBlock = `-- name: UpdateNetworkCurrentConfigBlock :exec UPDATE networks SET current_config_block_b64 = ?, updated_at = CURRENT_TIMESTAMP @@ -4143,16 +4434,16 @@ WHERE id = ? ` type UpdateNetworkCurrentConfigBlockParams struct { - CurrentConfigBlockB64 sql.NullString `json:"current_config_block_b64"` + CurrentConfigBlockB64 sql.NullString `json:"currentConfigBlockB64"` ID int64 `json:"id"` } -func (q *Queries) UpdateNetworkCurrentConfigBlock(ctx context.Context, arg UpdateNetworkCurrentConfigBlockParams) error { - _, err := q.exec(ctx, q.updateNetworkCurrentConfigBlockStmt, updateNetworkCurrentConfigBlock, arg.CurrentConfigBlockB64, arg.ID) +func (q *Queries) UpdateNetworkCurrentConfigBlock(ctx context.Context, arg *UpdateNetworkCurrentConfigBlockParams) error { + _, err := q.db.ExecContext(ctx, UpdateNetworkCurrentConfigBlock, arg.CurrentConfigBlockB64, arg.ID) return err } -const updateNetworkGenesisBlock = `-- name: UpdateNetworkGenesisBlock :one +const UpdateNetworkGenesisBlock = `-- name: UpdateNetworkGenesisBlock :one UPDATE networks SET genesis_block_b64 = ?, updated_at = CURRENT_TIMESTAMP @@ -4161,12 +4452,12 @@ RETURNING id, name, network_id, platform, status, description, config, deploymen ` type UpdateNetworkGenesisBlockParams struct { - GenesisBlockB64 sql.NullString `json:"genesis_block_b64"` + GenesisBlockB64 sql.NullString `json:"genesisBlockB64"` ID int64 `json:"id"` } -func (q *Queries) UpdateNetworkGenesisBlock(ctx context.Context, arg UpdateNetworkGenesisBlockParams) (Network, error) { - row := q.queryRow(ctx, q.updateNetworkGenesisBlockStmt, updateNetworkGenesisBlock, arg.GenesisBlockB64, arg.ID) +func (q *Queries) UpdateNetworkGenesisBlock(ctx context.Context, arg *UpdateNetworkGenesisBlockParams) (*Network, error) { + row := q.db.QueryRowContext(ctx, UpdateNetworkGenesisBlock, arg.GenesisBlockB64, arg.ID) var i Network err := row.Scan( &i.ID, @@ -4185,10 +4476,10 @@ func (q *Queries) UpdateNetworkGenesisBlock(ctx context.Context, arg UpdateNetwo &i.GenesisBlockB64, &i.CurrentConfigBlockB64, ) - return i, err + return &i, err } -const updateNetworkNodeRole = `-- name: UpdateNetworkNodeRole :one +const UpdateNetworkNodeRole = `-- name: UpdateNetworkNodeRole :one UPDATE network_nodes SET role = ?, updated_at = CURRENT_TIMESTAMP @@ -4198,12 +4489,12 @@ RETURNING id, network_id, node_id, role, status, config, created_at, updated_at type UpdateNetworkNodeRoleParams struct { Role string `json:"role"` - NetworkID int64 `json:"network_id"` - NodeID int64 `json:"node_id"` + NetworkID int64 `json:"networkId"` + NodeID int64 `json:"nodeId"` } -func (q *Queries) UpdateNetworkNodeRole(ctx context.Context, arg UpdateNetworkNodeRoleParams) (NetworkNode, error) { - row := q.queryRow(ctx, q.updateNetworkNodeRoleStmt, updateNetworkNodeRole, arg.Role, arg.NetworkID, arg.NodeID) +func (q *Queries) UpdateNetworkNodeRole(ctx context.Context, arg *UpdateNetworkNodeRoleParams) (*NetworkNode, error) { + row := q.db.QueryRowContext(ctx, UpdateNetworkNodeRole, arg.Role, arg.NetworkID, arg.NodeID) var i NetworkNode err := row.Scan( &i.ID, @@ -4215,10 +4506,10 @@ func (q *Queries) UpdateNetworkNodeRole(ctx context.Context, arg UpdateNetworkNo &i.CreatedAt, &i.UpdatedAt, ) - return i, err + return &i, err } -const updateNetworkNodeStatus = `-- name: UpdateNetworkNodeStatus :one +const UpdateNetworkNodeStatus = `-- name: UpdateNetworkNodeStatus :one UPDATE network_nodes SET status = ?, updated_at = CURRENT_TIMESTAMP @@ -4228,12 +4519,12 @@ RETURNING id, network_id, node_id, role, status, config, created_at, updated_at type UpdateNetworkNodeStatusParams struct { Status string `json:"status"` - NetworkID int64 `json:"network_id"` - NodeID int64 `json:"node_id"` + NetworkID int64 `json:"networkId"` + NodeID int64 `json:"nodeId"` } -func (q *Queries) UpdateNetworkNodeStatus(ctx context.Context, arg UpdateNetworkNodeStatusParams) (NetworkNode, error) { - row := q.queryRow(ctx, q.updateNetworkNodeStatusStmt, updateNetworkNodeStatus, arg.Status, arg.NetworkID, arg.NodeID) +func (q *Queries) UpdateNetworkNodeStatus(ctx context.Context, arg *UpdateNetworkNodeStatusParams) (*NetworkNode, error) { + row := q.db.QueryRowContext(ctx, UpdateNetworkNodeStatus, arg.Status, arg.NetworkID, arg.NodeID) var i NetworkNode err := row.Scan( &i.ID, @@ -4245,10 +4536,10 @@ func (q *Queries) UpdateNetworkNodeStatus(ctx context.Context, arg UpdateNetwork &i.CreatedAt, &i.UpdatedAt, ) - return i, err + return &i, err } -const updateNetworkStatus = `-- name: UpdateNetworkStatus :exec +const UpdateNetworkStatus = `-- name: UpdateNetworkStatus :exec UPDATE networks SET status = ?, updated_at = CURRENT_TIMESTAMP @@ -4260,26 +4551,67 @@ type UpdateNetworkStatusParams struct { ID int64 `json:"id"` } -func (q *Queries) UpdateNetworkStatus(ctx context.Context, arg UpdateNetworkStatusParams) error { - _, err := q.exec(ctx, q.updateNetworkStatusStmt, updateNetworkStatus, arg.Status, arg.ID) +func (q *Queries) UpdateNetworkStatus(ctx context.Context, arg *UpdateNetworkStatusParams) error { + _, err := q.db.ExecContext(ctx, UpdateNetworkStatus, arg.Status, arg.ID) return err } -const updateNodeDeploymentConfig = `-- name: UpdateNodeDeploymentConfig :one +const UpdateNodeConfig = `-- name: UpdateNodeConfig :one +UPDATE nodes +SET node_config = ?, + updated_at = CURRENT_TIMESTAMP +WHERE id = ? +RETURNING id, name, slug, platform, status, description, network_id, config, resources, endpoint, public_endpoint, p2p_address, created_at, created_by, updated_at, fabric_organization_id, node_type, node_config, deployment_config, error_message +` + +type UpdateNodeConfigParams struct { + NodeConfig sql.NullString `json:"nodeConfig"` + ID int64 `json:"id"` +} + +func (q *Queries) UpdateNodeConfig(ctx context.Context, arg *UpdateNodeConfigParams) (*Node, error) { + row := q.db.QueryRowContext(ctx, UpdateNodeConfig, arg.NodeConfig, arg.ID) + var i Node + err := row.Scan( + &i.ID, + &i.Name, + &i.Slug, + &i.Platform, + &i.Status, + &i.Description, + &i.NetworkID, + &i.Config, + &i.Resources, + &i.Endpoint, + &i.PublicEndpoint, + &i.P2pAddress, + &i.CreatedAt, + &i.CreatedBy, + &i.UpdatedAt, + &i.FabricOrganizationID, + &i.NodeType, + &i.NodeConfig, + &i.DeploymentConfig, + &i.ErrorMessage, + ) + return &i, err +} + +const UpdateNodeDeploymentConfig = `-- name: UpdateNodeDeploymentConfig :one UPDATE nodes SET deployment_config = ?, updated_at = CURRENT_TIMESTAMP WHERE id = ? -RETURNING id, name, slug, platform, status, description, network_id, config, resources, endpoint, public_endpoint, p2p_address, created_at, created_by, updated_at, fabric_organization_id, node_type, node_config, deployment_config +RETURNING id, name, slug, platform, status, description, network_id, config, resources, endpoint, public_endpoint, p2p_address, created_at, created_by, updated_at, fabric_organization_id, node_type, node_config, deployment_config, error_message ` type UpdateNodeDeploymentConfigParams struct { - DeploymentConfig sql.NullString `json:"deployment_config"` + DeploymentConfig sql.NullString `json:"deploymentConfig"` ID int64 `json:"id"` } -func (q *Queries) UpdateNodeDeploymentConfig(ctx context.Context, arg UpdateNodeDeploymentConfigParams) (Node, error) { - row := q.queryRow(ctx, q.updateNodeDeploymentConfigStmt, updateNodeDeploymentConfig, arg.DeploymentConfig, arg.ID) +func (q *Queries) UpdateNodeDeploymentConfig(ctx context.Context, arg *UpdateNodeDeploymentConfigParams) (*Node, error) { + row := q.db.QueryRowContext(ctx, UpdateNodeDeploymentConfig, arg.DeploymentConfig, arg.ID) var i Node err := row.Scan( &i.ID, @@ -4301,16 +4633,17 @@ func (q *Queries) UpdateNodeDeploymentConfig(ctx context.Context, arg UpdateNode &i.NodeType, &i.NodeConfig, &i.DeploymentConfig, + &i.ErrorMessage, ) - return i, err + return &i, err } -const updateNodeEndpoint = `-- name: UpdateNodeEndpoint :one +const UpdateNodeEndpoint = `-- name: UpdateNodeEndpoint :one UPDATE nodes SET endpoint = ?, updated_at = CURRENT_TIMESTAMP WHERE id = ? -RETURNING id, name, slug, platform, status, description, network_id, config, resources, endpoint, public_endpoint, p2p_address, created_at, created_by, updated_at, fabric_organization_id, node_type, node_config, deployment_config +RETURNING id, name, slug, platform, status, description, network_id, config, resources, endpoint, public_endpoint, p2p_address, created_at, created_by, updated_at, fabric_organization_id, node_type, node_config, deployment_config, error_message ` type UpdateNodeEndpointParams struct { @@ -4318,8 +4651,8 @@ type UpdateNodeEndpointParams struct { ID int64 `json:"id"` } -func (q *Queries) UpdateNodeEndpoint(ctx context.Context, arg UpdateNodeEndpointParams) (Node, error) { - row := q.queryRow(ctx, q.updateNodeEndpointStmt, updateNodeEndpoint, arg.Endpoint, arg.ID) +func (q *Queries) UpdateNodeEndpoint(ctx context.Context, arg *UpdateNodeEndpointParams) (*Node, error) { + row := q.db.QueryRowContext(ctx, UpdateNodeEndpoint, arg.Endpoint, arg.ID) var i Node err := row.Scan( &i.ID, @@ -4341,25 +4674,26 @@ func (q *Queries) UpdateNodeEndpoint(ctx context.Context, arg UpdateNodeEndpoint &i.NodeType, &i.NodeConfig, &i.DeploymentConfig, + &i.ErrorMessage, ) - return i, err + return &i, err } -const updateNodePublicEndpoint = `-- name: UpdateNodePublicEndpoint :one +const UpdateNodePublicEndpoint = `-- name: UpdateNodePublicEndpoint :one UPDATE nodes SET public_endpoint = ?, updated_at = CURRENT_TIMESTAMP WHERE id = ? -RETURNING id, name, slug, platform, status, description, network_id, config, resources, endpoint, public_endpoint, p2p_address, created_at, created_by, updated_at, fabric_organization_id, node_type, node_config, deployment_config +RETURNING id, name, slug, platform, status, description, network_id, config, resources, endpoint, public_endpoint, p2p_address, created_at, created_by, updated_at, fabric_organization_id, node_type, node_config, deployment_config, error_message ` type UpdateNodePublicEndpointParams struct { - PublicEndpoint sql.NullString `json:"public_endpoint"` + PublicEndpoint sql.NullString `json:"publicEndpoint"` ID int64 `json:"id"` } -func (q *Queries) UpdateNodePublicEndpoint(ctx context.Context, arg UpdateNodePublicEndpointParams) (Node, error) { - row := q.queryRow(ctx, q.updateNodePublicEndpointStmt, updateNodePublicEndpoint, arg.PublicEndpoint, arg.ID) +func (q *Queries) UpdateNodePublicEndpoint(ctx context.Context, arg *UpdateNodePublicEndpointParams) (*Node, error) { + row := q.db.QueryRowContext(ctx, UpdateNodePublicEndpoint, arg.PublicEndpoint, arg.ID) var i Node err := row.Scan( &i.ID, @@ -4381,16 +4715,18 @@ func (q *Queries) UpdateNodePublicEndpoint(ctx context.Context, arg UpdateNodePu &i.NodeType, &i.NodeConfig, &i.DeploymentConfig, + &i.ErrorMessage, ) - return i, err + return &i, err } -const updateNodeStatus = `-- name: UpdateNodeStatus :one +const UpdateNodeStatus = `-- name: UpdateNodeStatus :one UPDATE nodes SET status = ?, + error_message = NULL, updated_at = CURRENT_TIMESTAMP WHERE id = ? -RETURNING id, name, slug, platform, status, description, network_id, config, resources, endpoint, public_endpoint, p2p_address, created_at, created_by, updated_at, fabric_organization_id, node_type, node_config, deployment_config +RETURNING id, name, slug, platform, status, description, network_id, config, resources, endpoint, public_endpoint, p2p_address, created_at, created_by, updated_at, fabric_organization_id, node_type, node_config, deployment_config, error_message ` type UpdateNodeStatusParams struct { @@ -4398,8 +4734,51 @@ type UpdateNodeStatusParams struct { ID int64 `json:"id"` } -func (q *Queries) UpdateNodeStatus(ctx context.Context, arg UpdateNodeStatusParams) (Node, error) { - row := q.queryRow(ctx, q.updateNodeStatusStmt, updateNodeStatus, arg.Status, arg.ID) +func (q *Queries) UpdateNodeStatus(ctx context.Context, arg *UpdateNodeStatusParams) (*Node, error) { + row := q.db.QueryRowContext(ctx, UpdateNodeStatus, arg.Status, arg.ID) + var i Node + err := row.Scan( + &i.ID, + &i.Name, + &i.Slug, + &i.Platform, + &i.Status, + &i.Description, + &i.NetworkID, + &i.Config, + &i.Resources, + &i.Endpoint, + &i.PublicEndpoint, + &i.P2pAddress, + &i.CreatedAt, + &i.CreatedBy, + &i.UpdatedAt, + &i.FabricOrganizationID, + &i.NodeType, + &i.NodeConfig, + &i.DeploymentConfig, + &i.ErrorMessage, + ) + return &i, err +} + +const UpdateNodeStatusWithError = `-- name: UpdateNodeStatusWithError :one +UPDATE nodes +SET status = ?, + error_message = ?, + updated_at = CURRENT_TIMESTAMP +WHERE id = ? +RETURNING id, name, slug, platform, status, description, network_id, config, resources, endpoint, public_endpoint, p2p_address, created_at, created_by, updated_at, fabric_organization_id, node_type, node_config, deployment_config, error_message +` + +type UpdateNodeStatusWithErrorParams struct { + Status string `json:"status"` + ErrorMessage sql.NullString `json:"errorMessage"` + ID int64 `json:"id"` +} + +func (q *Queries) UpdateNodeStatusWithError(ctx context.Context, arg *UpdateNodeStatusWithErrorParams) (*Node, error) { + row := q.db.QueryRowContext(ctx, UpdateNodeStatusWithError, arg.Status, arg.ErrorMessage, arg.ID) var i Node err := row.Scan( &i.ID, @@ -4421,11 +4800,12 @@ func (q *Queries) UpdateNodeStatus(ctx context.Context, arg UpdateNodeStatusPara &i.NodeType, &i.NodeConfig, &i.DeploymentConfig, + &i.ErrorMessage, ) - return i, err + return &i, err } -const updateNotificationProvider = `-- name: UpdateNotificationProvider :one +const UpdateNotificationProvider = `-- name: UpdateNotificationProvider :one UPDATE notification_providers SET type = ?, name = ?, @@ -4444,16 +4824,16 @@ type UpdateNotificationProviderParams struct { Type string `json:"type"` Name string `json:"name"` Config string `json:"config"` - IsDefault bool `json:"is_default"` - NotifyNodeDowntime bool `json:"notify_node_downtime"` - NotifyBackupSuccess bool `json:"notify_backup_success"` - NotifyBackupFailure bool `json:"notify_backup_failure"` - NotifyS3ConnectionIssue bool `json:"notify_s3_connection_issue"` + IsDefault bool `json:"isDefault"` + NotifyNodeDowntime bool `json:"notifyNodeDowntime"` + NotifyBackupSuccess bool `json:"notifyBackupSuccess"` + NotifyBackupFailure bool `json:"notifyBackupFailure"` + NotifyS3ConnectionIssue bool `json:"notifyS3ConnectionIssue"` ID int64 `json:"id"` } -func (q *Queries) UpdateNotificationProvider(ctx context.Context, arg UpdateNotificationProviderParams) (NotificationProvider, error) { - row := q.queryRow(ctx, q.updateNotificationProviderStmt, updateNotificationProvider, +func (q *Queries) UpdateNotificationProvider(ctx context.Context, arg *UpdateNotificationProviderParams) (*NotificationProvider, error) { + row := q.db.QueryRowContext(ctx, UpdateNotificationProvider, arg.Type, arg.Name, arg.Config, @@ -4482,10 +4862,28 @@ func (q *Queries) UpdateNotificationProvider(ctx context.Context, arg UpdateNoti &i.LastTestStatus, &i.LastTestMessage, ) - return i, err + return &i, err +} + +const UpdateOrganizationCRL = `-- name: UpdateOrganizationCRL :exec +UPDATE fabric_organizations +SET crl_last_update = ?, + crl_key_id = ? +WHERE id = ? +` + +type UpdateOrganizationCRLParams struct { + CrlLastUpdate sql.NullTime `json:"crlLastUpdate"` + CrlKeyID sql.NullInt64 `json:"crlKeyId"` + ID int64 `json:"id"` +} + +func (q *Queries) UpdateOrganizationCRL(ctx context.Context, arg *UpdateOrganizationCRLParams) error { + _, err := q.db.ExecContext(ctx, UpdateOrganizationCRL, arg.CrlLastUpdate, arg.CrlKeyID, arg.ID) + return err } -const updateProviderTestResults = `-- name: UpdateProviderTestResults :one +const UpdateProviderTestResults = `-- name: UpdateProviderTestResults :one UPDATE notification_providers SET last_test_at = ?, last_test_status = ?, @@ -4496,14 +4894,14 @@ RETURNING id, name, type, config, is_default, is_enabled, created_at, updated_at ` type UpdateProviderTestResultsParams struct { - LastTestAt sql.NullTime `json:"last_test_at"` - LastTestStatus sql.NullString `json:"last_test_status"` - LastTestMessage sql.NullString `json:"last_test_message"` + LastTestAt sql.NullTime `json:"lastTestAt"` + LastTestStatus sql.NullString `json:"lastTestStatus"` + LastTestMessage sql.NullString `json:"lastTestMessage"` ID int64 `json:"id"` } -func (q *Queries) UpdateProviderTestResults(ctx context.Context, arg UpdateProviderTestResultsParams) (NotificationProvider, error) { - row := q.queryRow(ctx, q.updateProviderTestResultsStmt, updateProviderTestResults, +func (q *Queries) UpdateProviderTestResults(ctx context.Context, arg *UpdateProviderTestResultsParams) (*NotificationProvider, error) { + row := q.db.QueryRowContext(ctx, UpdateProviderTestResults, arg.LastTestAt, arg.LastTestStatus, arg.LastTestMessage, @@ -4527,10 +4925,35 @@ func (q *Queries) UpdateProviderTestResults(ctx context.Context, arg UpdateProvi &i.LastTestStatus, &i.LastTestMessage, ) - return i, err + return &i, err +} + +const UpdateSetting = `-- name: UpdateSetting :one +UPDATE settings +SET config = ?, + updated_at = CURRENT_TIMESTAMP +WHERE id = ? +RETURNING id, config, created_at, updated_at +` + +type UpdateSettingParams struct { + Config string `json:"config"` + ID int64 `json:"id"` +} + +func (q *Queries) UpdateSetting(ctx context.Context, arg *UpdateSettingParams) (*Setting, error) { + row := q.db.QueryRowContext(ctx, UpdateSetting, arg.Config, arg.ID) + var i Setting + err := row.Scan( + &i.ID, + &i.Config, + &i.CreatedAt, + &i.UpdatedAt, + ) + return &i, err } -const updateUser = `-- name: UpdateUser :one +const UpdateUser = `-- name: UpdateUser :one UPDATE users SET username = ?, password = CASE WHEN ? IS NOT NULL THEN ? ELSE password END, @@ -4541,13 +4964,13 @@ RETURNING id, username, password, name, email, role, provider, provider_id, avat type UpdateUserParams struct { Username string `json:"username"` - Column2 interface{} `json:"column_2"` + Column2 interface{} `json:"column2"` Password string `json:"password"` ID int64 `json:"id"` } -func (q *Queries) UpdateUser(ctx context.Context, arg UpdateUserParams) (User, error) { - row := q.queryRow(ctx, q.updateUserStmt, updateUser, +func (q *Queries) UpdateUser(ctx context.Context, arg *UpdateUserParams) (*User, error) { + row := q.db.QueryRowContext(ctx, UpdateUser, arg.Username, arg.Column2, arg.Password, @@ -4568,10 +4991,10 @@ func (q *Queries) UpdateUser(ctx context.Context, arg UpdateUserParams) (User, e &i.LastLoginAt, &i.UpdatedAt, ) - return i, err + return &i, err } -const updateUserLastLogin = `-- name: UpdateUserLastLogin :one +const UpdateUserLastLogin = `-- name: UpdateUserLastLogin :one UPDATE users SET last_login_at = CURRENT_TIMESTAMP, updated_at = CURRENT_TIMESTAMP @@ -4579,8 +5002,8 @@ WHERE id = ? RETURNING id, username, password, name, email, role, provider, provider_id, avatar_url, created_at, last_login_at, updated_at ` -func (q *Queries) UpdateUserLastLogin(ctx context.Context, id int64) (User, error) { - row := q.queryRow(ctx, q.updateUserLastLoginStmt, updateUserLastLogin, id) +func (q *Queries) UpdateUserLastLogin(ctx context.Context, id int64) (*User, error) { + row := q.db.QueryRowContext(ctx, UpdateUserLastLogin, id) var i User err := row.Scan( &i.ID, @@ -4596,5 +5019,5 @@ func (q *Queries) UpdateUserLastLogin(ctx context.Context, id int64) (User, erro &i.LastLoginAt, &i.UpdatedAt, ) - return i, err + return &i, err } diff --git a/pkg/db/schema.sql b/pkg/db/schema.sql index c8b3559..2796f19 100644 --- a/pkg/db/schema.sql +++ b/pkg/db/schema.sql @@ -184,7 +184,15 @@ CREATE TABLE IF NOT EXISTS proposal_submission_notifications ( tx_id TEXT NOT NULL, submitted_by TEXT NOT NULL, submitted_at TIMESTAMP NOT NULL, - created_at TIMESTAMP NOT NULL DEFAULT NOW(), + created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, FOREIGN KEY (proposal_id) REFERENCES governance_proposals(proposal_id) ON DELETE CASCADE, FOREIGN KEY (network_id) REFERENCES networks(id) ON DELETE CASCADE -); \ No newline at end of file +); + +-- Settings table for storing JSON configurations +CREATE TABLE settings ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + config JSON NOT NULL, + created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, + updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP +); diff --git a/pkg/errors/errors.go b/pkg/errors/errors.go index 1a7a4c8..38acf0d 100644 --- a/pkg/errors/errors.go +++ b/pkg/errors/errors.go @@ -2,6 +2,8 @@ package errors import ( "fmt" + + "github.com/pkg/errors" ) type ErrorType string @@ -85,7 +87,7 @@ func NewConflictError(msg string, details map[string]interface{}) *AppError { func NewInternalError(msg string, err error, details map[string]interface{}) *AppError { return &AppError{ Type: InternalError, - Message: msg, + Message: errors.Wrap(err, msg).Error(), Details: details, Err: err, } diff --git a/pkg/fabric/channel/channel.go b/pkg/fabric/channel/channel.go index 7f2078f..fc1034f 100644 --- a/pkg/fabric/channel/channel.go +++ b/pkg/fabric/channel/channel.go @@ -8,16 +8,17 @@ import ( "fmt" "crypto/x509" + "crypto/x509/pkix" "time" "github.com/hyperledger/fabric-config/configtx" "github.com/hyperledger/fabric-config/configtx/membership" "github.com/hyperledger/fabric-config/configtx/orderer" "github.com/hyperledger/fabric-config/protolator" - cb "github.com/hyperledger/fabric-protos-go/common" + cb "github.com/hyperledger/fabric-protos-go-apiv2/common" + "github.com/chainlaunch/chainlaunch/internal/protoutil" "github.com/golang/protobuf/proto" - "github.com/hyperledger/fabric/protoutil" ) // ChannelService handles channel operations @@ -87,8 +88,55 @@ func (s *ChannelService) CreateChannel(input CreateChannelInput) (*CreateChannel }, nil } +// SetCRLInput represents the input for setting CRL +type SetCRLInput struct { + CurrentConfig *cb.Config + CRL []byte + MSPID string + ChannelName string +} + +// SetCRL updates the CRL for an organization in a channel +func (s *ChannelService) SetCRL(input *SetCRLInput) (*cb.Envelope, error) { + // Create config manager and update CRL + cftxGen := configtx.New(input.CurrentConfig) + org, err := cftxGen.Application().Organization(input.MSPID).Configuration() + if err != nil { + return nil, fmt.Errorf("failed to get organization configuration: %w", err) + } + + crl, err := ParseCRL(input.CRL) + if err != nil { + return nil, fmt.Errorf("failed to parse CRL: %w", err) + } + org.MSP.RevocationList = []*pkix.CertificateList{crl} + err = cftxGen.Application().SetOrganization(org) + if err != nil { + return nil, fmt.Errorf("failed to set organization configuration: %w", err) + } + + // Compute update + configUpdateBytes, err := cftxGen.ComputeMarshaledUpdate(input.ChannelName) + if err != nil { + return nil, fmt.Errorf("failed to compute update: %w", err) + } + + configUpdate := &cb.ConfigUpdate{} + if err := proto.Unmarshal(configUpdateBytes, configUpdate); err != nil { + return nil, fmt.Errorf("failed to unmarshal config update: %w", err) + } + + // Create envelope + configEnvelope, err := s.createConfigUpdateEnvelope(input.ChannelName, configUpdate) + if err != nil { + return nil, fmt.Errorf("failed to create config update envelope: %w", err) + } + + return configEnvelope, nil +} + // SetAnchorPeers updates the anchor peers for an organization in a channel -func (s *ChannelService) SetAnchorPeers(input *SetAnchorPeersInput) ([]byte, error) { +func (s *ChannelService) SetAnchorPeers(input *SetAnchorPeersInput) (*cb.Envelope, error) { // Create config manager and update anchor peers cftxGen := configtx.New(input.CurrentConfig) app := cftxGen.Application().Organization(input.MSPID) @@ -104,7 +152,7 @@ func (s *ChannelService) SetAnchorPeers(input *SetAnchorPeersInput) ([]byte, err Host: ap.Host, Port: ap.Port, }); err != nil { - return nil, fmt.Errorf("failed to remove anchor peer: %w", err) + continue } } @@ -130,14 +178,14 @@ func (s *ChannelService) SetAnchorPeers(input *SetAnchorPeersInput) ([]byte, err } // Create envelope - envelopeBytes, err := s.createConfigUpdateEnvelope(input.ChannelName, configUpdate) + configEnvelope, err := s.createConfigUpdateEnvelope(input.ChannelName, configUpdate) if err != nil { return nil, fmt.Errorf("failed to create config update envelope: %w", err) } - return envelopeBytes, nil + return configEnvelope, nil } -func (s *ChannelService) createConfigUpdateEnvelope(channelID string, configUpdate *cb.ConfigUpdate) ([]byte, error) { +func (s *ChannelService) createConfigUpdateEnvelope(channelID string, configUpdate *cb.ConfigUpdate) (*cb.Envelope, error) { configUpdate.ChannelId = channelID configUpdateData, err := proto.Marshal(configUpdate) if err != nil { @@ -149,11 +197,8 @@ func (s *ChannelService) createConfigUpdateEnvelope(channelID string, configUpda if err != nil { return nil, err } - envelopeData, err := proto.Marshal(envelope) - if err != nil { - return nil, err - } - return envelopeData, nil + + return envelope, nil } // DecodeBlock decodes a base64 encoded block into JSON @@ -233,6 +278,12 @@ func (s *ChannelService) parseAndCreateChannel(input CreateChannelInput) ([]byte OrganizationalUnitIdentifier: "orderer", }, }, + Admins: []*x509.Certificate{}, + IntermediateCerts: []*x509.Certificate{}, + RevocationList: []*pkix.CertificateList{}, + OrganizationalUnitIdentifiers: []membership.OUIdentifier{}, + CryptoConfig: membership.CryptoConfig{}, + TLSIntermediateCerts: []*x509.Certificate{}, }, Policies: map[string]configtx.Policy{ "Admins": { @@ -254,6 +305,7 @@ func (s *ChannelService) parseAndCreateChannel(input CreateChannelInput) ([]byte }, AnchorPeers: anchorPeers, OrdererEndpoints: org.OrdererEndpoints, + ModPolicy: "", } peerOrgs = append(peerOrgs, peerOrg) @@ -297,6 +349,12 @@ func (s *ChannelService) parseAndCreateChannel(input CreateChannelInput) ([]byte OrganizationalUnitIdentifier: "peer", }, }, + Admins: []*x509.Certificate{}, + IntermediateCerts: []*x509.Certificate{}, + RevocationList: []*pkix.CertificateList{}, + OrganizationalUnitIdentifiers: []membership.OUIdentifier{}, + CryptoConfig: membership.CryptoConfig{}, + TLSIntermediateCerts: []*x509.Certificate{}, }, Policies: map[string]configtx.Policy{ "Admins": { @@ -311,8 +369,13 @@ func (s *ChannelService) parseAndCreateChannel(input CreateChannelInput) ([]byte Type: "Signature", Rule: fmt.Sprintf("OR('%s.member')", org.Name), }, + "Endorsement": { + Type: "Signature", + Rule: fmt.Sprintf("OR('%s.member')", org.Name), + }, }, OrdererEndpoints: org.OrdererEndpoints, + ModPolicy: "", } ordererOrgs = append(ordererOrgs, ordererOrg) @@ -527,3 +590,17 @@ func defaultACLs() map[string]string { "event/FilteredBlock": "/Channel/Application/Readers", } } + +func ParseCRL(crlBytes []byte) (*pkix.CertificateList, error) { + block, _ := pem.Decode(crlBytes) + if block == nil { + return nil, fmt.Errorf("failed to decode PEM block containing CRL") + } + + crl, err := x509.ParseCRL(block.Bytes) + if err != nil { + return nil, fmt.Errorf("failed to parse CRL: %v", err) + } + + return crl, nil +} diff --git a/pkg/fabric/handler/organization_handler.go b/pkg/fabric/handler/organization_handler.go index 1ca7f1c..818a079 100644 --- a/pkg/fabric/handler/organization_handler.go +++ b/pkg/fabric/handler/organization_handler.go @@ -1,13 +1,18 @@ package handler import ( + "crypto/x509" "encoding/json" + "encoding/pem" + "math/big" "net/http" "strconv" + "time" + "github.com/chainlaunch/chainlaunch/pkg/errors" "github.com/chainlaunch/chainlaunch/pkg/fabric/service" + "github.com/chainlaunch/chainlaunch/pkg/http/response" "github.com/go-chi/chi/v5" - "github.com/go-chi/render" ) type OrganizationHandler struct { @@ -20,21 +25,47 @@ func NewOrganizationHandler(service *service.OrganizationService) *OrganizationH } } +// RevokeCertificateBySerialRequest represents the request to revoke a certificate by serial number +type RevokeCertificateBySerialRequest struct { + SerialNumber string `json:"serialNumber"` // Hex string of the serial number + RevocationReason int `json:"revocationReason"` +} + +// RevokeCertificateByPEMRequest represents the request to revoke a certificate by PEM data +type RevokeCertificateByPEMRequest struct { + Certificate string `json:"certificate"` // PEM encoded certificate + RevocationReason int `json:"revocationReason"` +} + +// DeleteRevokedCertificateRequest represents the request to delete a revoked certificate by serial number +type DeleteRevokedCertificateRequest struct { + SerialNumber string `json:"serialNumber"` // Hex string of the serial number +} + // RegisterRoutes registers the organization routes func (h *OrganizationHandler) RegisterRoutes(r chi.Router) { r.Route("/organizations", func(r chi.Router) { - r.Post("/", h.CreateOrganization) - r.Get("/", h.ListOrganizations) - r.Get("/by-mspid/{mspid}", h.GetOrganizationByMspID) - r.Get("/{id}", h.GetOrganization) - r.Put("/{id}", h.UpdateOrganization) - r.Delete("/{id}", h.DeleteOrganization) + r.Post("/", response.Middleware(h.CreateOrganization)) + r.Get("/", response.Middleware(h.ListOrganizations)) + r.Get("/by-mspid/{mspid}", response.Middleware(h.GetOrganizationByMspID)) + r.Get("/{id}", response.Middleware(h.GetOrganization)) + r.Put("/{id}", response.Middleware(h.UpdateOrganization)) + r.Delete("/{id}", response.Middleware(h.DeleteOrganization)) + + // Add CRL-related routes + r.Route("/{id}/crl", func(r chi.Router) { + r.Post("/revoke/serial", response.Middleware(h.RevokeCertificateBySerial)) + r.Post("/revoke/pem", response.Middleware(h.RevokeCertificateByPEM)) + r.Delete("/revoke/serial", response.Middleware(h.DeleteRevokedCertificate)) + r.Get("/", response.Middleware(h.GetCRL)) + }) + r.Get("/{id}/revoked-certificates", response.Middleware(h.GetRevokedCertificates)) }) } // @Summary Create a new Fabric organization // @Description Create a new Fabric organization with the specified configuration -// @Tags organizations +// @Tags Organizations // @Accept json // @Produce json // @Param request body CreateOrganizationRequest true "Organization creation request" @@ -42,12 +73,13 @@ func (h *OrganizationHandler) RegisterRoutes(r chi.Router) { // @Failure 400 {object} map[string]string // @Failure 500 {object} map[string]string // @Router /organizations [post] -func (h *OrganizationHandler) CreateOrganization(w http.ResponseWriter, r *http.Request) { +func (h *OrganizationHandler) CreateOrganization(w http.ResponseWriter, r *http.Request) error { var req CreateOrganizationRequest if err := json.NewDecoder(r.Body).Decode(&req); err != nil { - render.Status(r, http.StatusBadRequest) - render.JSON(w, r, map[string]string{"error": "Invalid request body"}) - return + return errors.NewValidationError("invalid request body", map[string]interface{}{ + "detail": err.Error(), + "code": "INVALID_REQUEST_BODY", + }) } params := service.CreateOrganizationParams{ @@ -59,18 +91,15 @@ func (h *OrganizationHandler) CreateOrganization(w http.ResponseWriter, r *http. org, err := h.service.CreateOrganization(r.Context(), params) if err != nil { - render.Status(r, http.StatusInternalServerError) - render.JSON(w, r, map[string]string{"error": err.Error()}) - return + return errors.NewInternalError("failed to create organization", err, nil) } - render.Status(r, http.StatusCreated) - render.JSON(w, r, toOrganizationResponse(org)) + return response.WriteJSON(w, http.StatusCreated, toOrganizationResponse(org)) } // @Summary Get a Fabric organization // @Description Get a Fabric organization by ID -// @Tags organizations +// @Tags Organizations // @Accept json // @Produce json // @Param id path int true "Organization ID" @@ -78,27 +107,29 @@ func (h *OrganizationHandler) CreateOrganization(w http.ResponseWriter, r *http. // @Failure 404 {object} map[string]string // @Failure 500 {object} map[string]string // @Router /organizations/{id} [get] -func (h *OrganizationHandler) GetOrganization(w http.ResponseWriter, r *http.Request) { +func (h *OrganizationHandler) GetOrganization(w http.ResponseWriter, r *http.Request) error { id, err := strconv.ParseInt(chi.URLParam(r, "id"), 10, 64) if err != nil { - render.Status(r, http.StatusBadRequest) - render.JSON(w, r, map[string]string{"error": "Invalid organization ID"}) - return + return errors.NewValidationError("invalid organization ID", map[string]interface{}{ + "detail": err.Error(), + "code": "INVALID_ID_FORMAT", + }) } org, err := h.service.GetOrganization(r.Context(), id) if err != nil { - render.Status(r, http.StatusNotFound) - render.JSON(w, r, map[string]string{"error": err.Error()}) - return + return errors.NewNotFoundError("organization not found", map[string]interface{}{ + "code": "ORGANIZATION_NOT_FOUND", + "detail": err.Error(), + }) } - render.JSON(w, r, toOrganizationResponse(org)) + return response.WriteJSON(w, http.StatusOK, toOrganizationResponse(org)) } // @Summary Get a Fabric organization by MSP ID // @Description Get a Fabric organization by MSP ID -// @Tags organizations +// @Tags Organizations // @Accept json // @Produce json // @Param mspid path string true "MSP ID" @@ -106,27 +137,29 @@ func (h *OrganizationHandler) GetOrganization(w http.ResponseWriter, r *http.Req // @Failure 404 {object} map[string]string // @Failure 500 {object} map[string]string // @Router /organizations/by-mspid/{mspid} [get] -func (h *OrganizationHandler) GetOrganizationByMspID(w http.ResponseWriter, r *http.Request) { +func (h *OrganizationHandler) GetOrganizationByMspID(w http.ResponseWriter, r *http.Request) error { mspid := chi.URLParam(r, "mspid") if mspid == "" { - render.Status(r, http.StatusBadRequest) - render.JSON(w, r, map[string]string{"error": "Invalid MSP ID"}) - return + return errors.NewValidationError("invalid MSP ID", map[string]interface{}{ + "code": "INVALID_MSPID", + "detail": "MSP ID cannot be empty", + }) } org, err := h.service.GetOrganizationByMspID(r.Context(), mspid) if err != nil { - render.Status(r, http.StatusNotFound) - render.JSON(w, r, map[string]string{"error": err.Error()}) - return + return errors.NewNotFoundError("organization not found", map[string]interface{}{ + "code": "ORGANIZATION_NOT_FOUND", + "detail": err.Error(), + }) } - render.JSON(w, r, toOrganizationResponse(org)) + return response.WriteJSON(w, http.StatusOK, toOrganizationResponse(org)) } // @Summary Update a Fabric organization // @Description Update an existing Fabric organization -// @Tags organizations +// @Tags Organizations // @Accept json // @Produce json // @Param id path int true "Organization ID" @@ -136,36 +169,36 @@ func (h *OrganizationHandler) GetOrganizationByMspID(w http.ResponseWriter, r *h // @Failure 404 {object} map[string]string // @Failure 500 {object} map[string]string // @Router /organizations/{id} [put] -func (h *OrganizationHandler) UpdateOrganization(w http.ResponseWriter, r *http.Request) { +func (h *OrganizationHandler) UpdateOrganization(w http.ResponseWriter, r *http.Request) error { id, err := strconv.ParseInt(chi.URLParam(r, "id"), 10, 64) if err != nil { - render.Status(r, http.StatusBadRequest) - render.JSON(w, r, map[string]string{"error": "Invalid organization ID"}) - return + return errors.NewValidationError("invalid organization ID", map[string]interface{}{ + "detail": err.Error(), + "code": "INVALID_ID_FORMAT", + }) } var req UpdateOrganizationRequest if err := json.NewDecoder(r.Body).Decode(&req); err != nil { - render.Status(r, http.StatusBadRequest) - render.JSON(w, r, map[string]string{"error": "Invalid request body"}) - return + return errors.NewValidationError("invalid request body", map[string]interface{}{ + "detail": err.Error(), + "code": "INVALID_REQUEST_BODY", + }) } org, err := h.service.UpdateOrganization(r.Context(), id, service.UpdateOrganizationParams{ Description: req.Description, }) if err != nil { - render.Status(r, http.StatusInternalServerError) - render.JSON(w, r, map[string]string{"error": err.Error()}) - return + return errors.NewInternalError("failed to update organization", err, nil) } - render.JSON(w, r, toOrganizationResponse(org)) + return response.WriteJSON(w, http.StatusOK, toOrganizationResponse(org)) } // @Summary Delete a Fabric organization // @Description Delete a Fabric organization by ID -// @Tags organizations +// @Tags Organizations // @Accept json // @Produce json // @Param id path int true "Organization ID" @@ -173,43 +206,259 @@ func (h *OrganizationHandler) UpdateOrganization(w http.ResponseWriter, r *http. // @Failure 404 {object} map[string]string // @Failure 500 {object} map[string]string // @Router /organizations/{id} [delete] -func (h *OrganizationHandler) DeleteOrganization(w http.ResponseWriter, r *http.Request) { +func (h *OrganizationHandler) DeleteOrganization(w http.ResponseWriter, r *http.Request) error { id, err := strconv.ParseInt(chi.URLParam(r, "id"), 10, 64) if err != nil { - render.Status(r, http.StatusBadRequest) - render.JSON(w, r, map[string]string{"error": "Invalid organization ID"}) - return + return errors.NewValidationError("invalid organization ID", map[string]interface{}{ + "detail": err.Error(), + "code": "INVALID_ID_FORMAT", + }) } if err := h.service.DeleteOrganization(r.Context(), id); err != nil { - render.Status(r, http.StatusInternalServerError) - render.JSON(w, r, map[string]string{"error": err.Error()}) - return + return errors.NewInternalError("failed to delete organization", err, nil) } - render.Status(r, http.StatusNoContent) + return response.WriteJSON(w, http.StatusNoContent, nil) } // @Summary List all Fabric organizations // @Description Get a list of all Fabric organizations -// @Tags organizations +// @Tags Organizations // @Accept json // @Produce json // @Success 200 {array} OrganizationResponse // @Failure 500 {object} map[string]string // @Router /organizations [get] -func (h *OrganizationHandler) ListOrganizations(w http.ResponseWriter, r *http.Request) { +func (h *OrganizationHandler) ListOrganizations(w http.ResponseWriter, r *http.Request) error { orgs, err := h.service.ListOrganizations(r.Context()) if err != nil { - render.Status(r, http.StatusInternalServerError) - render.JSON(w, r, map[string]string{"error": err.Error()}) - return + return errors.NewInternalError("failed to list organizations", err, nil) } - response := make([]*OrganizationResponse, len(orgs)) + orgResponses := make([]*OrganizationResponse, len(orgs)) for i, org := range orgs { - response[i] = toOrganizationResponse(&org) + orgResponses[i] = toOrganizationResponse(&org) + } + + return response.WriteJSON(w, http.StatusOK, orgResponses) +} + +// @Summary Revoke a certificate using its serial number +// @Description Add a certificate to the organization's CRL using its serial number +// @Tags Organizations +// @Accept json +// @Produce json +// @Param id path int true "Organization ID" +// @Param request body RevokeCertificateBySerialRequest true "Certificate revocation request" +// @Success 200 {object} map[string]string +// @Failure 400 {object} map[string]string +// @Failure 500 {object} map[string]string +// @Router /organizations/{id}/crl/revoke/serial [post] +func (h *OrganizationHandler) RevokeCertificateBySerial(w http.ResponseWriter, r *http.Request) error { + id, err := strconv.ParseInt(chi.URLParam(r, "id"), 10, 64) + if err != nil { + return errors.NewValidationError("invalid organization ID", map[string]interface{}{ + "detail": err.Error(), + "code": "INVALID_ID_FORMAT", + }) + } + + var req RevokeCertificateBySerialRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + return errors.NewValidationError("invalid request body", map[string]interface{}{ + "detail": err.Error(), + "code": "INVALID_REQUEST_BODY", + }) + } + + serialNumber, ok := new(big.Int).SetString(req.SerialNumber, 16) + if !ok { + return errors.NewValidationError("invalid serial number format", map[string]interface{}{ + "code": "INVALID_SERIAL_NUMBER_FORMAT", + "detail": "Invalid serial number format", + }) + } + + err = h.service.RevokeCertificate(r.Context(), id, serialNumber, req.RevocationReason) + if err != nil { + return errors.NewInternalError("failed to revoke certificate", err, nil) + } + + return response.WriteJSON(w, http.StatusOK, map[string]string{"message": "Certificate revoked successfully"}) +} + +// @Summary Revoke a certificate using PEM data +// @Description Add a certificate to the organization's CRL using its PEM encoded data +// @Tags Organizations +// @Accept json +// @Produce json +// @Param id path int true "Organization ID" +// @Param request body RevokeCertificateByPEMRequest true "Certificate revocation request" +// @Success 200 {object} map[string]string +// @Failure 400 {object} map[string]string +// @Failure 500 {object} map[string]string +// @Router /organizations/{id}/crl/revoke/pem [post] +func (h *OrganizationHandler) RevokeCertificateByPEM(w http.ResponseWriter, r *http.Request) error { + id, err := strconv.ParseInt(chi.URLParam(r, "id"), 10, 64) + if err != nil { + return errors.NewValidationError("invalid organization ID", map[string]interface{}{ + "detail": err.Error(), + "code": "INVALID_ID_FORMAT", + }) + } + + var req RevokeCertificateByPEMRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + return errors.NewValidationError("invalid request body", map[string]interface{}{ + "detail": err.Error(), + "code": "INVALID_REQUEST_BODY", + }) + } + + block, _ := pem.Decode([]byte(req.Certificate)) + if block == nil || block.Type != "CERTIFICATE" { + return errors.NewValidationError("invalid certificate PEM data", map[string]interface{}{ + "code": "INVALID_CERTIFICATE_PEM_DATA", + "detail": "Invalid certificate PEM data", + }) + } + + cert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return errors.NewValidationError("failed to parse certificate", map[string]interface{}{ + "detail": err.Error(), + "code": "FAILED_TO_PARSE_CERTIFICATE", + }) + } + + err = h.service.RevokeCertificate(r.Context(), id, cert.SerialNumber, req.RevocationReason) + if err != nil { + return errors.NewInternalError("failed to revoke certificate", err, nil) + } + + return response.WriteJSON(w, http.StatusOK, map[string]string{ + "message": "Certificate revoked successfully", + "serialNumber": cert.SerialNumber.Text(16), + }) +} + +// @Summary Get organization's CRL +// @Description Get the current Certificate Revocation List for the organization +// @Tags Organizations +// @Accept json +// @Produce application/x-pem-file +// @Param id path int true "Organization ID" +// @Success 200 {string} string "PEM encoded CRL" +// @Failure 400 {object} map[string]string +// @Failure 500 {object} map[string]string +// @Router /organizations/{id}/crl [get] +func (h *OrganizationHandler) GetCRL(w http.ResponseWriter, r *http.Request) error { + id, err := strconv.ParseInt(chi.URLParam(r, "id"), 10, 64) + if err != nil { + return errors.NewValidationError("invalid organization ID", map[string]interface{}{ + "detail": err.Error(), + "code": "INVALID_ID_FORMAT", + }) + } + + crlBytes, err := h.service.GetCRL(r.Context(), id) + if err != nil { + return errors.NewInternalError("failed to get CRL", err, nil) + } + + w.Header().Set("Content-Type", "application/x-pem-file") + w.Header().Set("Content-Disposition", "attachment; filename=crl.pem") + _, err = w.Write(crlBytes) + if err != nil { + return errors.NewInternalError("failed to write response", err, nil) } - render.JSON(w, r, response) + return nil +} + +// @Summary Get organization's revoked certificates +// @Description Get all revoked certificates for the organization +// @Tags Organizations +// @Accept json +// @Produce json +// @Param id path int true "Organization ID" +// @Success 200 {array} RevokedCertificateResponse +// @Failure 400 {object} map[string]string +// @Failure 500 {object} map[string]string +// @Router /organizations/{id}/revoked-certificates [get] +func (h *OrganizationHandler) GetRevokedCertificates(w http.ResponseWriter, r *http.Request) error { + id, err := strconv.ParseInt(chi.URLParam(r, "id"), 10, 64) + if err != nil { + return errors.NewInternalError("failed to parse organization ID", err, nil) + } + + certs, err := h.service.GetRevokedCertificates(r.Context(), id) + if err != nil { + return errors.NewInternalError("failed to get revoked certificates", err, nil) + } + + certsResponse := make([]RevokedCertificateResponse, len(certs)) + for i, cert := range certs { + certsResponse[i] = RevokedCertificateResponse{ + SerialNumber: cert.SerialNumber, + RevocationTime: cert.RevocationTime, + Reason: cert.Reason, + } + } + + return response.WriteJSON(w, http.StatusOK, certsResponse) +} + +// @Summary Delete a revoked certificate using its serial number +// @Description Remove a certificate from the organization's CRL using its serial number +// @Tags Organizations +// @Accept json +// @Produce json +// @Param id path int true "Organization ID" +// @Param request body DeleteRevokedCertificateRequest true "Certificate deletion request" +// @Success 200 {object} map[string]string +// @Failure 400 {object} map[string]string +// @Failure 404 {object} map[string]string +// @Failure 500 {object} map[string]string +// @Router /organizations/{id}/crl/revoke/serial [delete] +func (h *OrganizationHandler) DeleteRevokedCertificate(w http.ResponseWriter, r *http.Request) error { + id, err := strconv.ParseInt(chi.URLParam(r, "id"), 10, 64) + if err != nil { + return errors.NewValidationError("invalid organization ID", map[string]interface{}{ + "detail": err.Error(), + "code": "INVALID_ID_FORMAT", + }) + } + + var req DeleteRevokedCertificateRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + return errors.NewValidationError("invalid request body", map[string]interface{}{ + "detail": err.Error(), + "code": "INVALID_REQUEST_BODY", + }) + } + + err = h.service.DeleteRevokedCertificate(r.Context(), id, req.SerialNumber) + if err != nil { + // Check if it's a not found error from the service + if errors.IsType(err, errors.NotFoundError) { + return errors.NewNotFoundError("certificate not found", map[string]interface{}{ + "code": "CERTIFICATE_NOT_FOUND", + "detail": "The specified certificate was not found in the revocation list", + }) + } + return errors.NewInternalError("failed to delete revoked certificate", err, nil) + } + + return response.WriteJSON(w, http.StatusOK, map[string]string{ + "message": "Certificate successfully removed from revocation list", + }) +} + +// RevokedCertificateResponse represents the response for a revoked certificate +type RevokedCertificateResponse struct { + SerialNumber string `json:"serialNumber"` + RevocationTime time.Time `json:"revocationTime"` + Reason int64 `json:"reason"` } diff --git a/pkg/fabric/networkconfig/parser.go b/pkg/fabric/networkconfig/parser.go new file mode 100644 index 0000000..14e80a1 --- /dev/null +++ b/pkg/fabric/networkconfig/parser.go @@ -0,0 +1,58 @@ +package networkconfig + +import ( + "io" + "os" + + "gopkg.in/yaml.v3" +) + +// LoadFromFile loads a network configuration from a YAML file +func LoadFromFile(path string) (*NetworkConfig, error) { + file, err := os.Open(path) + if err != nil { + return nil, err + } + defer file.Close() + + return LoadFromReader(file) +} + +// LoadFromReader loads a network configuration from an io.Reader +func LoadFromReader(reader io.Reader) (*NetworkConfig, error) { + var config NetworkConfig + decoder := yaml.NewDecoder(reader) + if err := decoder.Decode(&config); err != nil { + return nil, err + } + return &config, nil +} + +// LoadFromBytes loads a network configuration from a byte slice +func LoadFromBytes(data []byte) (*NetworkConfig, error) { + var config NetworkConfig + if err := yaml.Unmarshal(data, &config); err != nil { + return nil, err + } + return &config, nil +} + +// SaveToFile saves a network configuration to a YAML file +func (c *NetworkConfig) SaveToFile(path string) error { + data, err := yaml.Marshal(c) + if err != nil { + return err + } + return os.WriteFile(path, data, 0644) +} + +// SaveToWriter saves a network configuration to an io.Writer +func (c *NetworkConfig) SaveToWriter(writer io.Writer) error { + encoder := yaml.NewEncoder(writer) + return encoder.Encode(c) +} + +// SaveToBytes converts a network configuration to a byte slice +func (c *NetworkConfig) SaveToBytes() ([]byte, error) { + return yaml.Marshal(c) +} diff --git a/pkg/fabric/networkconfig/parser_test.go b/pkg/fabric/networkconfig/parser_test.go new file mode 100644 index 0000000..b81c87f --- /dev/null +++ b/pkg/fabric/networkconfig/parser_test.go @@ -0,0 +1,105 @@ +package networkconfig + +import ( + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestLoadFromFile(t *testing.T) { + // Create a temporary test file + tempDir := t.TempDir() + testFile := filepath.Join(tempDir, "test-config.yaml") + + // Write test YAML content + testYAML := ` +name: test-network +version: "1.0" +client: + organization: Org1 +organizations: + Org1: + mspid: Org1MSP + cryptoPath: /tmp/crypto + users: {} + peers: [] + orderers: [] +orderers: {} +peers: {} +certificateAuthorities: {} +channels: {} +` + err := os.WriteFile(testFile, []byte(testYAML), 0644) + assert.NoError(t, err) + + // Test loading from file + config, err := LoadFromFile(testFile) + assert.NoError(t, err) + assert.NotNil(t, config) + assert.Equal(t, "test-network", config.Name) + assert.Equal(t, "1.0", config.Version) + assert.Equal(t, "Org1", config.Client.Organization) +} + +func TestLoadFromBytes(t *testing.T) { + testYAML := ` +name: test-network +version: "1.0" +client: + organization: Org1 +organizations: + Org1: + mspid: Org1MSP + cryptoPath: /tmp/crypto + users: {} + peers: [] + orderers: [] +orderers: {} +peers: {} +certificateAuthorities: {} +channels: {} +` + + config, err := LoadFromBytes([]byte(testYAML)) + assert.NoError(t, err) + assert.NotNil(t, config) + assert.Equal(t, "test-network", config.Name) + assert.Equal(t, "1.0", config.Version) + assert.Equal(t, "Org1", config.Client.Organization) +} + +func TestSaveToFile(t *testing.T) { + // Create a test configuration + config := &NetworkConfig{ + Name: "test-network", + Version: "1.0", + Client: ClientConfig{ + Organization: "Org1", + }, + Organizations: make(map[string]Organization), + Orderers: make(map[string]Orderer), + Peers: make(map[string]Peer), + CertificateAuthorities: make(map[string]CertificateAuthority), + Channels: make(map[string]Channel), + } + + // Save to a temporary file + tempDir := t.TempDir() + testFile := filepath.Join(tempDir, "test-save.yaml") + err := config.SaveToFile(testFile) + assert.NoError(t, err) + + // Verify the file exists + _, err = os.Stat(testFile) + assert.NoError(t, err) + + // Load the saved file and verify contents + loadedConfig, err := LoadFromFile(testFile) + assert.NoError(t, err) + assert.NotNil(t, loadedConfig) + assert.Equal(t, config.Name, loadedConfig.Name) + assert.Equal(t, config.Version, loadedConfig.Version) + assert.Equal(t, config.Client.Organization, loadedConfig.Client.Organization) +} diff --git a/pkg/fabric/networkconfig/types.go b/pkg/fabric/networkconfig/types.go new file mode 100644 index 0000000..1d3e6d6 --- /dev/null +++ b/pkg/fabric/networkconfig/types.go @@ -0,0 +1,98 @@ +package networkconfig + +// NetworkConfig represents the root structure of the network configuration +type NetworkConfig struct { + Name string `yaml:"name"` + Version string `yaml:"version"` + Client ClientConfig `yaml:"client"` + Organizations map[string]Organization `yaml:"organizations"` + Orderers map[string]Orderer `yaml:"orderers"` + Peers map[string]Peer `yaml:"peers"` + CertificateAuthorities map[string]CertificateAuthority `yaml:"certificateAuthorities"` + Channels map[string]Channel `yaml:"channels"` +} + +// ClientConfig represents the client configuration +type ClientConfig struct { + Organization string `yaml:"organization"` +} + +// Organization represents an organization in the network +type Organization struct { + MSPID string `yaml:"mspid"` + CryptoPath string `yaml:"cryptoPath"` + Users map[string]User `yaml:"users"` + Peers []string `yaml:"peers"` + Orderers []string `yaml:"orderers"` +} + +// User represents a user in an organization +type User struct { + Cert UserCert `yaml:"cert"` + Key UserKey `yaml:"key"` +} + +// UserCert represents a user's certificate +type UserCert struct { + PEM string `yaml:"pem"` +} + +// UserKey represents a user's private key +type UserKey struct { + PEM string `yaml:"pem"` +} + +// Orderer represents an orderer node +type Orderer struct { + URL string `yaml:"url"` + AdminURL string `yaml:"adminUrl"` + AdminTLSCert string `yaml:"adminTlsCert"` + GRPCOptions GRPCOptions `yaml:"grpcOptions"` + TLSCACerts TLSCACerts `yaml:"tlsCACerts"` +} + +// Peer represents a peer node +type Peer struct { + URL string `yaml:"url"` + GRPCOptions GRPCOptions `yaml:"grpcOptions"` + TLSCACerts TLSCACerts `yaml:"tlsCACerts"` +} + +// GRPCOptions represents gRPC options +type GRPCOptions struct { + AllowInsecure bool `yaml:"allow-insecure"` +} + +// TLSCACerts represents TLS CA certificates +type TLSCACerts struct { + PEM string `yaml:"pem"` +} + +// CertificateAuthority represents a CA server +type CertificateAuthority struct { + URL string `yaml:"url"` + Registrar Registrar `yaml:"registrar"` + CAName string `yaml:"caName"` + TLSCACerts []TLSCACerts `yaml:"tlsCACerts"` +} + +// Registrar represents CA registrar information +type Registrar struct { + EnrollID string `yaml:"enrollId"` + EnrollSecret string `yaml:"enrollSecret"` +} + +// Channel represents a channel configuration +type Channel struct { + Orderers []string `yaml:"orderers"` + Peers map[string]PeerConfig `yaml:"peers"` +} + +// PeerConfig represents peer configuration within a channel +type PeerConfig struct { + Discover bool `yaml:"discover"` + EndorsingPeer bool `yaml:"endorsingPeer"` + ChaincodeQuery bool `yaml:"chaincodeQuery"` + LedgerQuery bool `yaml:"ledgerQuery"` + EventSource bool `yaml:"eventSource"` +} diff --git a/pkg/fabric/policydsl/policydsl.go b/pkg/fabric/policydsl/policydsl.go new file mode 100644 index 0000000..a22c09a --- /dev/null +++ b/pkg/fabric/policydsl/policydsl.go @@ -0,0 +1,384 @@ +/* +Copyright IBM Corp. 2017 All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package policydsl + +import ( + "fmt" + "reflect" + "regexp" + "strconv" + "strings" + + "github.com/Knetic/govaluate" + cb "github.com/hyperledger/fabric-protos-go-apiv2/common" + mb "github.com/hyperledger/fabric-protos-go-apiv2/msp" + "google.golang.org/protobuf/proto" +) + +// Gate values +const ( + GateAnd = "And" + GateOr = "Or" + GateOutOf = "OutOf" +) + +// Role values for principals +const ( + RoleAdmin = "admin" + RoleMember = "member" + RoleClient = "client" + RolePeer = "peer" + RoleOrderer = "orderer" +) + +var ( + regex = regexp.MustCompile( + fmt.Sprintf("^([[:alnum:].-]+)([.])(%s|%s|%s|%s|%s)$", + RoleAdmin, RoleMember, RoleClient, RolePeer, RoleOrderer), + ) + regexErr = regexp.MustCompile("^No parameter '([^']+)' found[.]$") +) + +// SignedBy creates a SignaturePolicy requiring a given signer's signature +func SignedBy(index int32) *cb.SignaturePolicy { + return &cb.SignaturePolicy{ + Type: &cb.SignaturePolicy_SignedBy{ + SignedBy: index, + }, + } +} + +// And is a convenience method which utilizes NOutOf to produce And equivalent behavior +func And(lhs, rhs *cb.SignaturePolicy) *cb.SignaturePolicy { + return NOutOf(2, []*cb.SignaturePolicy{lhs, rhs}) +} + +// Or is a convenience method which utilizes NOutOf to produce Or equivalent behavior +func Or(lhs, rhs *cb.SignaturePolicy) *cb.SignaturePolicy { + return NOutOf(1, []*cb.SignaturePolicy{lhs, rhs}) +} + +// NOutOf creates a policy which requires N out of the slice of policies to evaluate to true +func NOutOf(n int32, policies []*cb.SignaturePolicy) *cb.SignaturePolicy { + return &cb.SignaturePolicy{ + Type: &cb.SignaturePolicy_NOutOf_{ + NOutOf: &cb.SignaturePolicy_NOutOf{ + N: n, + Rules: policies, + }, + }, + } +} + +// a stub function - it returns the same string as it's passed. +// This will be evaluated by second/third passes to convert to a proto policy +func outof(args ...interface{}) (interface{}, error) { + toret := "outof(" + + if len(args) < 2 { + return nil, fmt.Errorf("expected at least two arguments to NOutOf. Given %d", len(args)) + } + + arg0 := args[0] + // govaluate treats all numbers as float64 only. But and/or may pass int/string. Allowing int/string for flexibility of caller + if n, ok := arg0.(float64); ok { + toret += strconv.Itoa(int(n)) + } else if n, ok := arg0.(int); ok { + toret += strconv.Itoa(n) + } else if n, ok := arg0.(string); ok { + toret += n + } else { + return nil, fmt.Errorf("unexpected type %s", reflect.TypeOf(arg0)) + } + + for _, arg := range args[1:] { + toret += ", " + + switch t := arg.(type) { + case string: + if regex.MatchString(t) { + toret += "'" + t + "'" + } else { + toret += t + } + default: + return nil, fmt.Errorf("unexpected type %s", reflect.TypeOf(arg)) + } + } + + return toret + ")", nil +} + +func and(args ...interface{}) (interface{}, error) { + args = append([]interface{}{len(args)}, args...) + return outof(args...) +} + +func or(args ...interface{}) (interface{}, error) { + args = append([]interface{}{1}, args...) + return outof(args...) +} + +func firstPass(args ...interface{}) (interface{}, error) { + toret := "outof(ID" + for _, arg := range args { + toret += ", " + + switch t := arg.(type) { + case string: + if regex.MatchString(t) { + toret += "'" + t + "'" + } else { + toret += t + } + case float32: + case float64: + toret += strconv.Itoa(int(t)) + default: + return nil, fmt.Errorf("unexpected type %s", reflect.TypeOf(arg)) + } + } + + return toret + ")", nil +} + +func secondPass(args ...interface{}) (interface{}, error) { + /* general sanity check, we expect at least 3 args */ + if len(args) < 3 { + return nil, fmt.Errorf("at least 3 arguments expected, got %d", len(args)) + } + + /* get the first argument, we expect it to be the context */ + var ctx *context + switch v := args[0].(type) { + case *context: + ctx = v + default: + return nil, fmt.Errorf("unrecognized type, expected the context, got %s", reflect.TypeOf(args[0])) + } + + /* get the second argument, we expect an integer telling us + how many of the remaining we expect to have*/ + var t int + switch arg := args[1].(type) { + case float64: + t = int(arg) + default: + return nil, fmt.Errorf("unrecognized type, expected a number, got %s", reflect.TypeOf(args[1])) + } + + /* get the n in the t out of n */ + n := len(args) - 2 + + /* sanity check - t should be positive, permit equal to n+1, but disallow over n+1 */ + if t < 0 || t > n+1 { + return nil, fmt.Errorf("invalid t-out-of-n predicate, t %d, n %d", t, n) + } + + policies := make([]*cb.SignaturePolicy, 0) + + /* handle the rest of the arguments */ + for _, principal := range args[2:] { + switch t := principal.(type) { + /* if it's a string, we expect it to be formed as + . , where MSP_ID is the MSP identifier + and ROLE is either a member, an admin, a client, a peer or an orderer*/ + case string: + /* split the string */ + subm := regex.FindAllStringSubmatch(t, -1) + if subm == nil || len(subm) != 1 || len(subm[0]) != 4 { + return nil, fmt.Errorf("error parsing principal %s", t) + } + + /* get the right role */ + var r mb.MSPRole_MSPRoleType + + switch subm[0][3] { + case RoleMember: + r = mb.MSPRole_MEMBER + case RoleAdmin: + r = mb.MSPRole_ADMIN + case RoleClient: + r = mb.MSPRole_CLIENT + case RolePeer: + r = mb.MSPRole_PEER + case RoleOrderer: + r = mb.MSPRole_ORDERER + default: + return nil, fmt.Errorf("error parsing role %s", t) + } + + /* build the principal we've been told */ + mspRole, err := proto.Marshal(&mb.MSPRole{MspIdentifier: subm[0][1], Role: r}) + if err != nil { + return nil, fmt.Errorf("error marshalling msp role: %s", err) + } + + p := &mb.MSPPrincipal{ + PrincipalClassification: mb.MSPPrincipal_ROLE, + Principal: mspRole, + } + ctx.principals = append(ctx.principals, p) + + /* create a SignaturePolicy that requires a signature from + the principal we've just built*/ + dapolicy := SignedBy(int32(ctx.IDNum)) + policies = append(policies, dapolicy) + + /* increment the identity counter. Note that this is + suboptimal as we are not reusing identities. We + can deduplicate them easily and make this puppy + smaller. For now it's fine though */ + // TODO: deduplicate principals + ctx.IDNum++ + + /* if we've already got a policy we're good, just append it */ + case *cb.SignaturePolicy: + policies = append(policies, t) + + default: + return nil, fmt.Errorf("unrecognized type, expected a principal or a policy, got %s", reflect.TypeOf(principal)) + } + } + + return NOutOf(int32(t), policies), nil +} + +type context struct { + IDNum int + principals []*mb.MSPPrincipal +} + +func newContext() *context { + return &context{IDNum: 0, principals: make([]*mb.MSPPrincipal, 0)} +} + +// FromString takes a string representation of the policy, +// parses it and returns a SignaturePolicyEnvelope that +// implements that policy. The supported language is as follows: +// +// GATE(P[, P]) +// +// where: +// - GATE is either "and" or "or" +// - P is either a principal or another nested call to GATE +// +// A principal is defined as: +// +// # ORG.ROLE +// +// where: +// - ORG is a string (representing the MSP identifier) +// - ROLE takes the value of any of the RoleXXX constants representing +// the required role +func FromString(policy string) (*cb.SignaturePolicyEnvelope, error) { + // first we translate the and/or business into outof gates + intermediate, err := govaluate.NewEvaluableExpressionWithFunctions( + policy, map[string]govaluate.ExpressionFunction{ + GateAnd: and, + strings.ToLower(GateAnd): and, + strings.ToUpper(GateAnd): and, + GateOr: or, + strings.ToLower(GateOr): or, + strings.ToUpper(GateOr): or, + GateOutOf: outof, + strings.ToLower(GateOutOf): outof, + strings.ToUpper(GateOutOf): outof, + }, + ) + if err != nil { + return nil, err + } + + intermediateRes, err := intermediate.Evaluate(map[string]interface{}{}) + if err != nil { + // attempt to produce a meaningful error + if regexErr.MatchString(err.Error()) { + sm := regexErr.FindStringSubmatch(err.Error()) + if len(sm) == 2 { + return nil, fmt.Errorf("unrecognized token '%s' in policy string", sm[1]) + } + } + + return nil, err + } + + resStr, ok := intermediateRes.(string) + if !ok { + return nil, fmt.Errorf("invalid policy string '%s'", policy) + } + + // we still need two passes. The first pass just adds an extra + // argument ID to each of the outof calls. This is + // required because govaluate has no means of giving context + // to user-implemented functions other than via arguments. + // We need this argument because we need a global place where + // we put the identities that the policy requires + exp, err := govaluate.NewEvaluableExpressionWithFunctions( + resStr, + map[string]govaluate.ExpressionFunction{"outof": firstPass}, + ) + if err != nil { + return nil, err + } + + res, err := exp.Evaluate(map[string]interface{}{}) + if err != nil { + // attempt to produce a meaningful error + if regexErr.MatchString(err.Error()) { + sm := regexErr.FindStringSubmatch(err.Error()) + if len(sm) == 2 { + return nil, fmt.Errorf("unrecognized token '%s' in policy string", sm[1]) + } + } + + return nil, err + } + + resStr, ok = res.(string) + if !ok { + return nil, fmt.Errorf("invalid policy string '%s'", policy) + } + + ctx := newContext() + parameters := make(map[string]interface{}, 1) + parameters["ID"] = ctx + + exp, err = govaluate.NewEvaluableExpressionWithFunctions( + resStr, + map[string]govaluate.ExpressionFunction{"outof": secondPass}, + ) + if err != nil { + return nil, err + } + + res, err = exp.Evaluate(parameters) + if err != nil { + // attempt to produce a meaningful error + if regexErr.MatchString(err.Error()) { + sm := regexErr.FindStringSubmatch(err.Error()) + if len(sm) == 2 { + return nil, fmt.Errorf("unrecognized token '%s' in policy string", sm[1]) + } + } + + return nil, err + } + + rule, ok := res.(*cb.SignaturePolicy) + if !ok { + return nil, fmt.Errorf("invalid policy string '%s'", policy) + } + + p := &cb.SignaturePolicyEnvelope{ + Identities: ctx.principals, + Version: 0, + Rule: rule, + } + + return p, nil +} diff --git a/pkg/fabric/service/organization_service.go b/pkg/fabric/service/organization_service.go index 9b98649..e61b8c1 100644 --- a/pkg/fabric/service/organization_service.go +++ b/pkg/fabric/service/organization_service.go @@ -2,17 +2,26 @@ package service import ( "context" + "crypto" + "crypto/rand" + "crypto/x509" + "crypto/x509/pkix" "database/sql" + "encoding/asn1" + "encoding/pem" "errors" "fmt" + "math/big" "os" "path/filepath" "strings" "time" + "github.com/chainlaunch/chainlaunch/pkg/config" "github.com/chainlaunch/chainlaunch/pkg/db" "github.com/chainlaunch/chainlaunch/pkg/keymanagement/models" keymanagement "github.com/chainlaunch/chainlaunch/pkg/keymanagement/service" + gwidentity "github.com/hyperledger/fabric-gateway/pkg/identity" ) // OrganizationDTO represents the service layer data structure @@ -48,19 +57,28 @@ type UpdateOrganizationParams struct { Description *string } +// RevokedCertificateDTO represents a revoked certificate +type RevokedCertificateDTO struct { + SerialNumber string `json:"serialNumber"` + RevocationTime time.Time `json:"revocationTime"` + Reason int64 `json:"reason"` +} + type OrganizationService struct { queries *db.Queries keyManagement *keymanagement.KeyManagementService + configService *config.ConfigService } -func NewOrganizationService(queries *db.Queries, keyManagement *keymanagement.KeyManagementService) *OrganizationService { +func NewOrganizationService(queries *db.Queries, keyManagement *keymanagement.KeyManagementService, configService *config.ConfigService) *OrganizationService { return &OrganizationService{ queries: queries, keyManagement: keyManagement, + configService: configService, } } -func mapDBOrganizationToServiceOrganization(org db.GetFabricOrganizationByMspIDRow) *OrganizationDTO { +func mapDBOrganizationToServiceOrganization(org *db.GetFabricOrganizationByMspIDRow) *OrganizationDTO { providerName := "" if org.ProviderName.Valid { providerName = org.ProviderName.String @@ -87,7 +105,7 @@ func mapDBOrganizationToServiceOrganization(org db.GetFabricOrganizationByMspIDR } // Convert database model to DTO for single organization -func toOrganizationDTO(org db.GetFabricOrganizationWithKeysRow) *OrganizationDTO { +func toOrganizationDTO(org *db.GetFabricOrganizationWithKeysRow) *OrganizationDTO { providerName := "" if org.ProviderName.Valid { providerName = org.ProviderName.String @@ -114,7 +132,7 @@ func toOrganizationDTO(org db.GetFabricOrganizationWithKeysRow) *OrganizationDTO } // Convert database model to DTO for list of organizations -func toOrganizationListDTO(org db.ListFabricOrganizationsWithKeysRow) *OrganizationDTO { +func toOrganizationListDTO(org *db.ListFabricOrganizationsWithKeysRow) *OrganizationDTO { providerName := "" if org.ProviderName.Valid { providerName = org.ProviderName.String @@ -296,7 +314,7 @@ func (s *OrganizationService) CreateOrganization(ctx context.Context, params Cre } // Create organization - org, err := s.queries.CreateFabricOrganization(ctx, db.CreateFabricOrganizationParams{ + org, err := s.queries.CreateFabricOrganization(ctx, &db.CreateFabricOrganizationParams{ MspID: params.MspID, Description: sql.NullString{String: params.Description, Valid: params.Description != ""}, ProviderID: sql.NullInt64{Int64: params.ProviderID, Valid: true}, @@ -364,7 +382,7 @@ func (s *OrganizationService) UpdateOrganization(ctx context.Context, id int64, } // Update organization - _, err = s.queries.UpdateFabricOrganization(ctx, db.UpdateFabricOrganizationParams{ + _, err = s.queries.UpdateFabricOrganization(ctx, &db.UpdateFabricOrganizationParams{ ID: id, Description: org.Description, }) @@ -403,12 +421,8 @@ func (s *OrganizationService) DeleteOrganization(ctx context.Context, id int64) // Delete the organization directory // Convert MspID to lowercase for the directory name mspIDLower := strings.ToLower(org.MspID) - homeDir, err := os.UserHomeDir() - if err != nil { - return fmt.Errorf("failed to get user home directory: %w", err) - } - orgDir := filepath.Join(homeDir, ".chainlaunch", "orgs", mspIDLower) + orgDir := filepath.Join(s.configService.GetDataPath(), "orgs", mspIDLower) err = os.RemoveAll(orgDir) if err != nil { // Log the error but don't fail the operation @@ -431,3 +445,207 @@ func (s *OrganizationService) ListOrganizations(ctx context.Context) ([]Organiza } return dtos, nil } + +// GetCRL returns the current CRL for the organization in PEM format +func (s *OrganizationService) GetCRL(ctx context.Context, orgID int64) ([]byte, error) { + // Get organization details + org, err := s.queries.GetFabricOrganizationWithKeys(ctx, orgID) + if err != nil { + if errors.Is(err, sql.ErrNoRows) { + return nil, fmt.Errorf("organization not found") + } + return nil, fmt.Errorf("failed to get organization: %w", err) + } + + // Get all revoked certificates for this organization + revokedCerts, err := s.queries.GetRevokedCertificates(ctx, orgID) + if err != nil { + return nil, fmt.Errorf("failed to get revoked certificates: %w", err) + } + + // Get the admin signing key for signing the CRL + adminSignKey, err := s.keyManagement.GetKey(ctx, int(org.SignKeyID.Int64)) + if err != nil { + return nil, fmt.Errorf("failed to get admin sign key: %w", err) + } + + // Parse the certificate + cert, err := gwidentity.CertificateFromPEM([]byte(*adminSignKey.Certificate)) + if err != nil { + return nil, fmt.Errorf("failed to parse certificate: %w", err) + } + + // Get private key from key management service + privateKeyPEM, err := s.keyManagement.GetDecryptedPrivateKey(int(org.SignKeyID.Int64)) + if err != nil { + return nil, fmt.Errorf("failed to get private key: %w", err) + } + + // Parse the private key + priv, err := gwidentity.PrivateKeyFromPEM([]byte(privateKeyPEM)) + if err != nil { + return nil, fmt.Errorf("failed to parse private key: %w", err) + } + + // Cast private key to crypto.Signer + signer, ok := priv.(crypto.Signer) + if !ok { + return nil, fmt.Errorf("private key does not implement crypto.Signer") + } + + // Create CRL + now := time.Now() + crl := &x509.RevocationList{ + Number: big.NewInt(1), + ThisUpdate: now, + NextUpdate: now.AddDate(0, 0, 7), // Valid for 7 days + } + + // Add all revoked certificates + for _, rc := range revokedCerts { + serialNumber, ok := new(big.Int).SetString(rc.SerialNumber, 16) + if !ok { + return nil, fmt.Errorf("invalid serial number format: %s", rc.SerialNumber) + } + + revokedCert := pkix.RevokedCertificate{ + SerialNumber: serialNumber, + RevocationTime: rc.RevocationTime, + Extensions: []pkix.Extension{ + { + Id: asn1.ObjectIdentifier{2, 5, 29, 21}, // CRLReason OID + Value: []byte{byte(rc.Reason)}, + }, + }, + } + crl.RevokedCertificates = append(crl.RevokedCertificates, revokedCert) + } + + // Create the CRL + crlBytes, err := x509.CreateRevocationList(rand.Reader, crl, cert, signer) + if err != nil { + return nil, fmt.Errorf("failed to create CRL: %w", err) + } + + // Encode the CRL in PEM format + pemBlock := &pem.Block{ + Type: "X509 CRL", + Bytes: crlBytes, + } + + return pem.EncodeToMemory(pemBlock), nil +} + +// GetRevokedCertificates returns all revoked certificates for an organization +func (s *OrganizationService) GetRevokedCertificates(ctx context.Context, orgID int64) ([]RevokedCertificateDTO, error) { + // Get organization details to verify it exists + _, err := s.queries.GetFabricOrganizationWithKeys(ctx, orgID) + if err != nil { + if errors.Is(err, sql.ErrNoRows) { + return nil, fmt.Errorf("organization not found") + } + return nil, fmt.Errorf("failed to get organization: %w", err) + } + + // Get all revoked certificates for this organization + revokedCerts, err := s.queries.GetRevokedCertificates(ctx, orgID) + if err != nil { + return nil, fmt.Errorf("failed to get revoked certificates: %w", err) + } + + // Convert to DTOs + dtos := make([]RevokedCertificateDTO, len(revokedCerts)) + for i, cert := range revokedCerts { + dtos[i] = RevokedCertificateDTO{ + SerialNumber: cert.SerialNumber, + RevocationTime: cert.RevocationTime, + Reason: cert.Reason, + } + } + + return dtos, nil +} + +// RevokeCertificate adds a certificate to the organization's CRL +func (s *OrganizationService) RevokeCertificate(ctx context.Context, orgID int64, serialNumber *big.Int, reason int) error { + // Get organization details + org, err := s.queries.GetFabricOrganizationWithKeys(ctx, orgID) + if err != nil { + if errors.Is(err, sql.ErrNoRows) { + return fmt.Errorf("organization not found") + } + return fmt.Errorf("failed to get organization: %w", err) + } + + if !org.SignKeyID.Valid { + return fmt.Errorf("organization has no admin sign key") + } + + // Check if certificate is already revoked + revokedCerts, err := s.queries.GetRevokedCertificates(ctx, orgID) + if err != nil { + return fmt.Errorf("failed to check revoked certificates: %w", err) + } + + serialNumberHex := serialNumber.Text(16) + for _, cert := range revokedCerts { + if cert.SerialNumber == serialNumberHex { + return fmt.Errorf("certificate with serial number %s is already revoked", serialNumberHex) + } + } + + // Check if CRL is initialized (has last update time) + if !org.CrlLastUpdate.Valid { + // Initialize CRL timestamps + now := time.Now() + err = s.queries.UpdateOrganizationCRL(ctx, &db.UpdateOrganizationCRLParams{ + ID: orgID, + CrlLastUpdate: sql.NullTime{Time: now, Valid: true}, + CrlKeyID: org.SignKeyID, + }) + if err != nil { + return fmt.Errorf("failed to initialize CRL: %w", err) + } + } + + // Add the certificate to the database + err = s.queries.AddRevokedCertificate(ctx, &db.AddRevokedCertificateParams{ + FabricOrganizationID: orgID, + SerialNumber: serialNumberHex, // Store as hex string + RevocationTime: time.Now(), + Reason: int64(reason), + IssuerCertificateID: sql.NullInt64{ + Int64: org.SignKeyID.Int64, + Valid: true, + }, + }) + if err != nil { + return fmt.Errorf("failed to add revoked certificate: %w", err) + } + + // Update the CRL timestamps + now := time.Now() + err = s.queries.UpdateOrganizationCRL(ctx, &db.UpdateOrganizationCRLParams{ + ID: orgID, + CrlLastUpdate: sql.NullTime{Time: now, Valid: true}, + CrlKeyID: org.AdminSignKeyID, + }) + if err != nil { + return fmt.Errorf("failed to update CRL timestamps: %w", err) + } + + return nil +} + +// DeleteRevokedCertificate removes a certificate from the organization's revocation list +func (s *OrganizationService) DeleteRevokedCertificate(ctx context.Context, orgID int64, serialNumber string) error { + err := s.queries.DeleteRevokedCertificate(ctx, &db.DeleteRevokedCertificateParams{ + FabricOrganizationID: orgID, + SerialNumber: serialNumber, + }) + if err != nil { + return err + } + + return nil +} diff --git a/pkg/keymanagement/handler/handler.go b/pkg/keymanagement/handler/handler.go index ec8ae6c..e4f8d53 100644 --- a/pkg/keymanagement/handler/handler.go +++ b/pkg/keymanagement/handler/handler.go @@ -24,7 +24,7 @@ func NewKeyManagementHandler(service *service.KeyManagementService) *KeyManageme // @Summary Get all keys // @Description Get all keys with their certificates and metadata -// @Tags keys +// @Tags Keys // @Accept json // @Produce json // @Success 200 {array} models.KeyResponse @@ -44,7 +44,7 @@ func (h *KeyManagementHandler) GetAllKeys(w http.ResponseWriter, r *http.Request // @Summary Create a new key // @Description Create a new key pair with specified algorithm and parameters -// @Tags keys +// @Tags Keys // @Accept json // @Produce json // @Param request body models.CreateKeyRequest true "Key creation request" @@ -99,7 +99,7 @@ func (h *KeyManagementHandler) RegisterRoutes(r chi.Router) { // @Summary Get paginated keys // @Description Get a paginated list of keys -// @Tags keys +// @Tags Keys // @Accept json // @Produce json // @Param page query int false "Page number" default(1) @@ -139,7 +139,7 @@ func (h *KeyManagementHandler) GetKeys(w http.ResponseWriter, r *http.Request) { // @Summary Get a specific key by ID // @Description Get detailed information about a specific key -// @Tags keys +// @Tags Keys // @Accept json // @Produce json // @Param id path int true "Key ID" @@ -169,7 +169,7 @@ func (h *KeyManagementHandler) GetKey(w http.ResponseWriter, r *http.Request) { // @Summary Delete a key // @Description Delete a specific key by ID -// @Tags keys +// @Tags Keys // @Accept json // @Produce json // @Param id path int true "Key ID" @@ -198,7 +198,7 @@ func (h *KeyManagementHandler) DeleteKey(w http.ResponseWriter, r *http.Request) // @Summary Create a new key provider // @Description Create a new provider for key management -// @Tags providers +// @Tags Providers // @Accept json // @Produce json // @Param request body models.CreateProviderRequest true "Provider creation request" @@ -228,7 +228,7 @@ func (h *KeyManagementHandler) CreateProvider(w http.ResponseWriter, r *http.Req // @Summary List all key providers // @Description Get a list of all configured key providers -// @Tags providers +// @Tags Providers // @Accept json // @Produce json // @Success 200 {array} models.ProviderResponse @@ -248,7 +248,7 @@ func (h *KeyManagementHandler) ListProviders(w http.ResponseWriter, r *http.Requ // @Summary Get a specific provider // @Description Get detailed information about a specific key provider -// @Tags providers +// @Tags Providers // @Accept json // @Produce json // @Param id path int true "Provider ID" @@ -278,7 +278,7 @@ func (h *KeyManagementHandler) GetProvider(w http.ResponseWriter, r *http.Reques // @Summary Delete a provider // @Description Delete a specific key provider -// @Tags providers +// @Tags Providers // @Accept json // @Produce json // @Param id path int true "Provider ID" @@ -308,7 +308,7 @@ func (h *KeyManagementHandler) DeleteProvider(w http.ResponseWriter, r *http.Req // @Summary Sign a certificate // @Description Sign a certificate for a key using a CA key -// @Tags keys +// @Tags Keys // @Accept json // @Produce json // @Param keyID path int true "Key ID to sign" @@ -352,7 +352,7 @@ func (h *KeyManagementHandler) SignCertificate(w http.ResponseWriter, r *http.Re // @Summary Filter keys by algorithm and curve // @Description Get keys filtered by algorithm type and/or curve type -// @Tags keys +// @Tags Keys // @Accept json // @Produce json // @Param algorithm query string false "Algorithm type (e.g., RSA, ECDSA)" diff --git a/pkg/keymanagement/models/models.go b/pkg/keymanagement/models/models.go index e2bb458..b2ef265 100644 --- a/pkg/keymanagement/models/models.go +++ b/pkg/keymanagement/models/models.go @@ -122,6 +122,7 @@ type KeyResponse struct { Provider KeyProviderInfo `json:"provider"` PrivateKey string `json:"privateKey"` EthereumAddress string `json:"ethereumAddress"` + SigningKeyID *int `json:"signingKeyID,omitempty"` } type KeyProviderInfo struct { diff --git a/pkg/keymanagement/providers/database/provider.go b/pkg/keymanagement/providers/database/provider.go index c4a11f2..326a91c 100644 --- a/pkg/keymanagement/providers/database/provider.go +++ b/pkg/keymanagement/providers/database/provider.go @@ -22,6 +22,7 @@ import ( "io" "math/big" "os" + "strings" "time" "github.com/chainlaunch/chainlaunch/pkg/db" @@ -172,7 +173,7 @@ func (p *DatabaseProvider) GenerateKey(ctx context.Context, req types.GenerateKe } // Create key in database - params := db.CreateKeyParams{ + params := &db.CreateKeyParams{ Name: req.Name, Algorithm: string(req.Algorithm), PublicKey: keyPair.PublicKey, @@ -258,7 +259,7 @@ func (p *DatabaseProvider) StoreKey(ctx context.Context, req types.StoreKeyReque } // Store in database - key, err := p.queries.CreateKey(ctx, db.CreateKeyParams{ + key, err := p.queries.CreateKey(ctx, &db.CreateKeyParams{ Name: req.Name, Description: sql.NullString{String: *req.Description, Valid: req.Description != nil}, Algorithm: string(req.Algorithm), @@ -327,7 +328,7 @@ func (p *DatabaseProvider) DeleteKey(ctx context.Context, id int) error { } // Helper function to map database key to response -func mapDBKeyToResponse(key db.Key) *models.KeyResponse { +func mapDBKeyToResponse(key *db.Key) *models.KeyResponse { response := &models.KeyResponse{ ID: int(key.ID), Name: key.Name, @@ -508,7 +509,7 @@ func (s *DatabaseProvider) generateECKeyPair(req models.CreateKeyRequest) (*KeyP // Generate Ethereum address address := crypto.PubkeyToAddress(*publicKeyECDSA) - ethereumAddress = address.Hex() + ethereumAddress = strings.ToLower(address.Hex()) } return &KeyPair{ @@ -550,7 +551,7 @@ func (s *DatabaseProvider) generateECKeyPair(req models.CreateKeyRequest) (*KeyP // Generate Ethereum address address := crypto.PubkeyToAddress(*publicKeyECDSA) - ethereumAddress = address.Hex() + ethereumAddress = strings.ToLower(address.Hex()) } return &KeyPair{ @@ -772,7 +773,7 @@ func (p *DatabaseProvider) SignCertificate(ctx context.Context, req types.SignCe }) // Update key with new certificate and other fields - params := db.UpdateKeyParams{ + params := &db.UpdateKeyParams{ ID: int64(req.KeyID), Name: key.Name, Description: key.Description, @@ -789,6 +790,7 @@ func (p *DatabaseProvider) SignCertificate(ctx context.Context, req types.SignCe Sha1Fingerprint: key.Sha1Fingerprint, ProviderID: key.ProviderID, UserID: key.UserID, + SigningKeyID: sql.NullInt64{Int64: int64(req.CAKeyID), Valid: true}, } updatedKey, err := p.queries.UpdateKey(ctx, params) diff --git a/pkg/keymanagement/service/service.go b/pkg/keymanagement/service/service.go index 63a7902..c8df962 100644 --- a/pkg/keymanagement/service/service.go +++ b/pkg/keymanagement/service/service.go @@ -51,7 +51,7 @@ func (s *KeyManagementService) InitializeKeyProviders(ctx context.Context) error return err } // Create default provider - _, err = s.queries.CreateKeyProvider(ctx, db.CreateKeyProviderParams{ + _, err = s.queries.CreateKeyProvider(ctx, &db.CreateKeyProviderParams{ Name: "Default Database Provider", Type: "DATABASE", IsDefault: 1, @@ -140,7 +140,7 @@ func (s *KeyManagementService) GetKeys(ctx context.Context, page, pageSize int) offset := (page - 1) * pageSize // Get keys with pagination - keys, err := s.queries.ListKeys(ctx, db.ListKeysParams{ + keys, err := s.queries.ListKeys(ctx, &db.ListKeysParams{ Limit: int64(pageSize), Offset: int64(offset), }) @@ -173,7 +173,8 @@ func (s *KeyManagementService) GetKeys(ctx context.Context, page, pageSize int) ID: int(key.ProviderID), Name: key.ProviderName, }, - PrivateKey: key.PrivateKey, + PrivateKey: key.PrivateKey, + EthereumAddress: key.EthereumAddress.String, } } @@ -195,6 +196,7 @@ func (s *KeyManagementService) GetKey(ctx context.Context, id int) (*models.KeyR } keySize := int(key.KeySize.Int64) curve := models.ECCurve(key.Curve.String) + signingKeyID := int(key.SigningKeyID.Int64) return &models.KeyResponse{ ID: int(key.ID), Name: key.Name, @@ -217,6 +219,7 @@ func (s *KeyManagementService) GetKey(ctx context.Context, id int) (*models.KeyR }, PrivateKey: key.PrivateKey, EthereumAddress: key.EthereumAddress.String, + SigningKeyID: &signingKeyID, }, err } @@ -250,7 +253,7 @@ func (s *KeyManagementService) CreateProvider(ctx context.Context, req models.Cr return nil, err } - provider, err := s.queries.CreateKeyProvider(ctx, db.CreateKeyProviderParams{ + provider, err := s.queries.CreateKeyProvider(ctx, &db.CreateKeyProviderParams{ Name: req.Name, Type: string(req.Type), IsDefault: int64(req.IsDefault), @@ -317,7 +320,7 @@ func (s *KeyManagementService) DeleteProvider(ctx context.Context, id int) error return nil } -func mapProviderToResponse(provider db.KeyProvider) *models.ProviderResponse { +func mapProviderToResponse(provider *db.KeyProvider) *models.ProviderResponse { return &models.ProviderResponse{ ID: int(provider.ID), Name: provider.Name, @@ -548,20 +551,31 @@ func (s *KeyManagementService) GetDecryptedPrivateKey(id int) (string, error) { // FilterKeys returns keys filtered by algorithm and/or curve func (s *KeyManagementService) FilterKeys(ctx context.Context, algorithm, curve string, page, pageSize int) (*models.PaginatedResponse, error) { - var keys []db.Key + var keys []*db.GetKeysByFilterRow var err error if curve != "" { // If curve is specified, use GetKeysByProviderAndCurve // TODO: Get provider ID from context or configuration providerID := int64(1) - keys, err = s.queries.GetKeysByProviderAndCurve(ctx, db.GetKeysByProviderAndCurveParams{ - ProviderID: providerID, - Curve: sql.NullString{String: curve, Valid: true}, + keys, err = s.queries.GetKeysByFilter(ctx, &db.GetKeysByFilterParams{ + AlgorithmFilter: algorithm, + Algorithm: algorithm, + ProviderIDFilter: 0, + ProviderID: providerID, + CurveFilter: curve, + Curve: sql.NullString{String: curve, Valid: true}, }) } else if algorithm != "" { // If only algorithm is specified, use GetKeysByAlgorithm - keys, err = s.queries.GetKeysByAlgorithm(ctx, algorithm) + keys, err = s.queries.GetKeysByFilter(ctx, &db.GetKeysByFilterParams{ + AlgorithmFilter: algorithm, + Algorithm: algorithm, + ProviderIDFilter: 0, + ProviderID: 0, + CurveFilter: "", + Curve: sql.NullString{String: "", Valid: false}, + }) } else { // If no filters, get all keys return nil, fmt.Errorf("no filters provided") @@ -585,13 +599,30 @@ func (s *KeyManagementService) FilterKeys(ctx context.Context, algorithm, curve // Convert db.Key to models.KeyResponse keyResponses := make([]models.KeyResponse, 0) for _, key := range keys[start:end] { + keySize := int(key.KeySize.Int64) curve := models.ECCurve(key.Curve.String) keyResponses = append(keyResponses, models.KeyResponse{ - ID: int(key.ID), - Name: key.Name, - Algorithm: models.KeyAlgorithm(key.Algorithm), - Curve: &curve, - CreatedAt: key.CreatedAt, + ID: int(key.ID), + Name: key.Name, + Description: &key.Description.String, + Algorithm: models.KeyAlgorithm(key.Algorithm), + KeySize: &keySize, + Curve: &curve, + Format: key.Format, + PublicKey: key.PublicKey, + Certificate: &key.Certificate.String, + Status: key.Status, + CreatedAt: key.CreatedAt, + ExpiresAt: &key.ExpiresAt.Time, + LastRotatedAt: &key.LastRotatedAt.Time, + SHA256Fingerprint: key.Sha256Fingerprint, + SHA1Fingerprint: key.Sha1Fingerprint, + Provider: models.KeyProviderInfo{ + ID: int(key.ProviderID), + Name: key.ProviderName, + }, + PrivateKey: key.PrivateKey, + EthereumAddress: key.EthereumAddress.String, }) } @@ -608,3 +639,154 @@ type KeyInfo struct { KeyType string PublicKey string } + +// SetSigningKeyIDForKey updates a key with the ID of the key that signed its certificate +func (s *KeyManagementService) SetSigningKeyIDForKey(ctx context.Context, keyID, signingKeyID int) error { + // Validate that both keys exist + key, err := s.queries.GetKey(ctx, int64(keyID)) + if err != nil { + if errors.Is(err, sql.ErrNoRows) { + return fmt.Errorf("key not found") + } + return fmt.Errorf("failed to get key: %w", err) + } + + signingKey, err := s.queries.GetKey(ctx, int64(signingKeyID)) + if err != nil { + if errors.Is(err, sql.ErrNoRows) { + return fmt.Errorf("signing key not found") + } + return fmt.Errorf("failed to get signing key: %w", err) + } + + // Verify that the signing key is a CA + if signingKey.IsCa != 1 { + return fmt.Errorf("signing key %d is not a CA", signingKeyID) + } + + // Verify that the key has a certificate + if !key.Certificate.Valid { + return fmt.Errorf("key %d does not have a certificate", keyID) + } + + // Update the key with the signing key ID + params := &db.UpdateKeyParams{ + ID: key.ID, + Name: key.Name, + Description: key.Description, + Algorithm: key.Algorithm, + KeySize: key.KeySize, + Curve: key.Curve, + Format: key.Format, + PublicKey: key.PublicKey, + PrivateKey: key.PrivateKey, + Certificate: key.Certificate, + Status: key.Status, + ExpiresAt: key.ExpiresAt, + Sha256Fingerprint: key.Sha256Fingerprint, + Sha1Fingerprint: key.Sha1Fingerprint, + ProviderID: key.ProviderID, + UserID: key.UserID, + EthereumAddress: key.EthereumAddress, + SigningKeyID: sql.NullInt64{Int64: int64(signingKeyID), Valid: true}, + } + + _, err = s.queries.UpdateKey(ctx, params) + if err != nil { + return fmt.Errorf("failed to update key with signing key ID: %w", err) + } + + return nil +} + +// RenewCertificate renews a certificate using the same keypair and CA that was used to generate it +func (s *KeyManagementService) RenewCertificate(ctx context.Context, keyID int, certReq models.CertificateRequest) (*models.KeyResponse, error) { + // Get the key details + key, err := s.queries.GetKey(ctx, int64(keyID)) + if err != nil { + if errors.Is(err, sql.ErrNoRows) { + return nil, fmt.Errorf("key not found") + } + return nil, fmt.Errorf("failed to get key: %w", err) + } + + // Check if the key has a certificate + if !key.Certificate.Valid { + return nil, fmt.Errorf("key does not have a certificate to renew") + } + + // Get the CA key ID that was used to sign this certificate + if !key.SigningKeyID.Valid { + return nil, fmt.Errorf("key does not have an associated CA key") + } + caKeyID := int(key.SigningKeyID.Int64) + + // Validate that the CA key exists and is a CA + caKey, err := s.queries.GetKey(ctx, int64(caKeyID)) + if err != nil { + if errors.Is(err, sql.ErrNoRows) { + return nil, fmt.Errorf("CA key not found") + } + return nil, fmt.Errorf("failed to get CA key: %w", err) + } + + // Check if the CA key is marked as CA + if caKey.IsCa != 1 { + return nil, fmt.Errorf("key %d is not a CA", caKeyID) + } + + // Get provider + provider, err := s.providerFactory.GetProvider(providers.ProviderTypeDatabase) + if err != nil { + return nil, fmt.Errorf("failed to get provider: %w", err) + } + + // If no certificate request is provided, use the existing certificate's details + if certReq.CommonName == "" { + existingCert, err := parseCertificate(key.Certificate.String) + if err != nil { + return nil, fmt.Errorf("failed to parse existing certificate: %w", err) + } + + certReq = models.CertificateRequest{ + CommonName: existingCert.Subject.CommonName, + Organization: existingCert.Subject.Organization, + OrganizationalUnit: existingCert.Subject.OrganizationalUnit, + Country: existingCert.Subject.Country, + Province: existingCert.Subject.Province, + Locality: existingCert.Subject.Locality, + StreetAddress: existingCert.Subject.StreetAddress, + PostalCode: existingCert.Subject.PostalCode, + DNSNames: existingCert.DNSNames, + EmailAddresses: existingCert.EmailAddresses, + IPAddresses: existingCert.IPAddresses, + URIs: existingCert.URIs, + ValidFor: models.Duration(365 * 24 * time.Hour), + IsCA: existingCert.IsCA, + KeyUsage: x509.KeyUsage(existingCert.KeyUsage), + ExtKeyUsage: existingCert.ExtKeyUsage, + } + } + + // Sign the certificate with the same CA + return provider.SignCertificate(ctx, types.SignCertificateRequest{ + KeyID: keyID, + CAKeyID: caKeyID, + CertificateRequest: *ToProviderCertRequest(&certReq), + }) +} + +// Helper function to parse PEM certificate +func parseCertificate(certPEM string) (*x509.Certificate, error) { + block, _ := pem.Decode([]byte(certPEM)) + if block == nil { + return nil, fmt.Errorf("failed to decode PEM block containing certificate") + } + + cert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return nil, fmt.Errorf("failed to parse certificate: %w", err) + } + + return cert, nil +} diff --git a/pkg/log/handler.go b/pkg/log/handler.go new file mode 100644 index 0000000..1530f0e --- /dev/null +++ b/pkg/log/handler.go @@ -0,0 +1,293 @@ +package log + +import ( + "encoding/json" + "fmt" + "net/http" + "strconv" + + "github.com/chainlaunch/chainlaunch/pkg/nodes/service" +) + +// LogHandler handles HTTP requests for log operations +type LogHandler struct { + logService *LogService + nodeService *service.NodeService +} + +// LogResponse represents the standard response format for log endpoints +type LogResponse struct { + Success bool `json:"success"` + Data interface{} `json:"data,omitempty"` + Error string `json:"error,omitempty"` +} + +// NewLogHandler creates a new instance of LogHandler +func NewLogHandler(logService *LogService, nodeService *service.NodeService) *LogHandler { + return &LogHandler{ + logService: logService, + nodeService: nodeService, + } +} + +// RegisterRoutes registers the log handler routes with the provided router +func (h *LogHandler) RegisterRoutes(router *http.ServeMux) { + router.HandleFunc("/nodes/{nodeID}/logs", h.GetNodeLogs) + router.HandleFunc("/nodes/{nodeID}/logs/range", h.GetLogRange) + router.HandleFunc("/nodes/{nodeID}/logs/filter", h.FilterLogs) + router.HandleFunc("/nodes/{nodeID}/logs/tail", h.TailLogs) + router.HandleFunc("/nodes/{nodeID}/logs/stats", h.GetLogStats) +} + +// GetNodeLogs handles requests to get all logs for a node +func (h *LogHandler) GetNodeLogs(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodGet { + h.sendError(w, http.StatusMethodNotAllowed, "method not allowed") + return + } + + nodeID, err := h.getNodeIDFromPath(r.URL.Path) + if err != nil { + h.sendError(w, http.StatusBadRequest, "invalid node ID") + return + } + + // Get node from node service + node, err := h.nodeService.GetNode(r.Context(), nodeID) + if err != nil { + h.sendError(w, http.StatusNotFound, "node not found") + return + } + + // Get log file path based on node type and configuration + logPath, err := h.nodeService.GetNodeLogPath(r.Context(), node) + if err != nil { + h.sendError(w, http.StatusNotFound, "log file not found") + return + } + + // Stream logs to response + w.Header().Set("Content-Type", "text/plain") + err = h.logService.StreamLog(logPath, FilterOptions{}, w) + if err != nil { + h.sendError(w, http.StatusInternalServerError, "failed to stream logs") + return + } +} + +// GetLogRange handles requests to get a specific range of log lines +func (h *LogHandler) GetLogRange(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodGet { + h.sendError(w, http.StatusMethodNotAllowed, "method not allowed") + return + } + + nodeID, err := h.getNodeIDFromPath(r.URL.Path) + if err != nil { + h.sendError(w, http.StatusBadRequest, "invalid node ID") + return + } + + startLine, err := strconv.Atoi(r.URL.Query().Get("start")) + if err != nil { + h.sendError(w, http.StatusBadRequest, "invalid start line") + return + } + + endLine, err := strconv.Atoi(r.URL.Query().Get("end")) + if err != nil { + h.sendError(w, http.StatusBadRequest, "invalid end line") + return + } + + // Get node and log path + node, err := h.nodeService.GetNode(r.Context(), nodeID) + if err != nil { + h.sendError(w, http.StatusNotFound, "node not found") + return + } + + logPath, err := h.nodeService.GetNodeLogPath(r.Context(), node) + if err != nil { + h.sendError(w, http.StatusNotFound, "log file not found") + return + } + + // Get log range + logRange, err := h.logService.ReadLogRange(logPath, startLine, endLine) + if err != nil { + h.sendError(w, http.StatusInternalServerError, "failed to read log range") + return + } + + h.sendJSON(w, http.StatusOK, logRange) +} + +// FilterLogs handles requests to filter logs based on pattern and range +func (h *LogHandler) FilterLogs(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodGet { + h.sendError(w, http.StatusMethodNotAllowed, "method not allowed") + return + } + + nodeID, err := h.getNodeIDFromPath(r.URL.Path) + if err != nil { + h.sendError(w, http.StatusBadRequest, "invalid node ID") + return + } + + // Parse filter options from query parameters + options := FilterOptions{ + Pattern: r.URL.Query().Get("pattern"), + IgnoreCase: r.URL.Query().Get("ignoreCase") == "true", + } + + if startStr := r.URL.Query().Get("start"); startStr != "" { + start, err := strconv.Atoi(startStr) + if err != nil { + h.sendError(w, http.StatusBadRequest, "invalid start line") + return + } + options.StartLine = start + } + + if endStr := r.URL.Query().Get("end"); endStr != "" { + end, err := strconv.Atoi(endStr) + if err != nil { + h.sendError(w, http.StatusBadRequest, "invalid end line") + return + } + options.EndLine = end + } + + // Get node and log path + node, err := h.nodeService.GetNode(r.Context(), nodeID) + if err != nil { + h.sendError(w, http.StatusNotFound, "node not found") + return + } + + logPath, err := h.nodeService.GetNodeLogPath(r.Context(), node) + if err != nil { + h.sendError(w, http.StatusNotFound, "log file not found") + return + } + + // Filter logs + entries, err := h.logService.FilterLog(logPath, options) + if err != nil { + h.sendError(w, http.StatusInternalServerError, "failed to filter logs") + return + } + + h.sendJSON(w, http.StatusOK, entries) +} + +// TailLogs handles requests to get the last n lines of logs +func (h *LogHandler) TailLogs(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodGet { + h.sendError(w, http.StatusMethodNotAllowed, "method not allowed") + return + } + + nodeID, err := h.getNodeIDFromPath(r.URL.Path) + if err != nil { + h.sendError(w, http.StatusBadRequest, "invalid node ID") + return + } + + lines, err := strconv.Atoi(r.URL.Query().Get("lines")) + if err != nil || lines < 1 || lines > maxTailLines { + h.sendError(w, http.StatusBadRequest, fmt.Sprintf("invalid number of lines (must be between 1 and %d)", maxTailLines)) + return + } + + // Get node and log path + node, err := h.nodeService.GetNode(r.Context(), nodeID) + if err != nil { + h.sendError(w, http.StatusNotFound, "node not found") + return + } + + logPath, err := h.nodeService.GetNodeLogPath(r.Context(), node) + if err != nil { + h.sendError(w, http.StatusNotFound, "log file not found") + return + } + + // Get tail of log + entries, err := h.logService.TailLog(logPath, lines) + if err != nil { + h.sendError(w, http.StatusInternalServerError, "failed to tail logs") + return + } + + h.sendJSON(w, http.StatusOK, entries) +} + +// GetLogStats handles requests to get statistics about a log file +func (h *LogHandler) GetLogStats(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodGet { + h.sendError(w, http.StatusMethodNotAllowed, "method not allowed") + return + } + + nodeID, err := h.getNodeIDFromPath(r.URL.Path) + if err != nil { + h.sendError(w, http.StatusBadRequest, "invalid node ID") + return + } + + // Get node and log path + node, err := h.nodeService.GetNode(r.Context(), nodeID) + if err != nil { + h.sendError(w, http.StatusNotFound, "node not found") + return + } + + logPath, err := h.nodeService.GetNodeLogPath(r.Context(), node) + if err != nil { + h.sendError(w, http.StatusNotFound, "log file not found") + return + } + + // Get log stats + stats, err := h.logService.GetLogStats(logPath) + if err != nil { + h.sendError(w, http.StatusInternalServerError, "failed to get log stats") + return + } + + h.sendJSON(w, http.StatusOK, stats) +} + +// getNodeIDFromPath extracts the node ID from the URL path +func (h *LogHandler) getNodeIDFromPath(path string) (int64, error) { + // Extract nodeID from path like "/nodes/{nodeID}/logs" + var nodeID int64 + _, err := fmt.Sscanf(path, "/nodes/%d/logs", &nodeID) + if err != nil { + return 0, err + } + return nodeID, nil +} + +// sendJSON sends a JSON response +func (h *LogHandler) sendJSON(w http.ResponseWriter, status int, data interface{}) { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(status) + json.NewEncoder(w).Encode(LogResponse{ + Success: true, + Data: data, + }) +} + +// sendError sends an error response +func (h *LogHandler) sendError(w http.ResponseWriter, status int, message string) { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(status) + json.NewEncoder(w).Encode(LogResponse{ + Success: false, + Error: message, + }) +} diff --git a/pkg/log/index.go b/pkg/log/index.go new file mode 100644 index 0000000..c5da926 --- /dev/null +++ b/pkg/log/index.go @@ -0,0 +1,161 @@ +package log + +import ( + "bufio" + "encoding/binary" + "fmt" + "os" + "sync" +) + +// LineIndex stores the byte offsets of lines in a log file +type LineIndex struct { + offsets []int64 // Byte offsets for each line + indexPath string // Path to the index file + sourceSize int64 // Size of the source file when indexed + mutex sync.RWMutex +} + +// newLineIndex creates a new line index for a log file +func newLineIndex(logPath string) (*LineIndex, error) { + indexPath := logPath + ".idx" + index := &LineIndex{ + indexPath: indexPath, + } + + // Try to load existing index + if err := index.load(); err == nil { + // Verify if the index is still valid + if info, err := os.Stat(logPath); err == nil { + if info.Size() == index.sourceSize { + return index, nil + } + } + } + + // Create new index if loading failed or index is invalid + if err := index.build(logPath); err != nil { + return nil, err + } + + return index, nil +} + +// build creates a new index for the log file +func (idx *LineIndex) build(logPath string) error { + idx.mutex.Lock() + defer idx.mutex.Unlock() + + file, err := os.Open(logPath) + if err != nil { + return fmt.Errorf("failed to open log file: %w", err) + } + defer file.Close() + + // Get file size + info, err := file.Stat() + if err != nil { + return fmt.Errorf("failed to get file info: %w", err) + } + idx.sourceSize = info.Size() + + // Create a buffered reader + reader := bufio.NewReader(file) + var offset int64 + idx.offsets = make([]int64, 0, 1000) // Pre-allocate space for 1000 lines + + // Record the offset of the first line + idx.offsets = append(idx.offsets, 0) + + for { + line, err := reader.ReadBytes('\n') + if err != nil { + break // End of file or error + } + offset += int64(len(line)) + idx.offsets = append(idx.offsets, offset) + } + + // Save the index to disk + return idx.save() +} + +// save writes the index to disk +func (idx *LineIndex) save() error { + file, err := os.Create(idx.indexPath) + if err != nil { + return fmt.Errorf("failed to create index file: %w", err) + } + defer file.Close() + + // Write source file size + if err := binary.Write(file, binary.LittleEndian, idx.sourceSize); err != nil { + return fmt.Errorf("failed to write source size: %w", err) + } + + // Write number of offsets + numOffsets := int64(len(idx.offsets)) + if err := binary.Write(file, binary.LittleEndian, numOffsets); err != nil { + return fmt.Errorf("failed to write offset count: %w", err) + } + + // Write offsets + for _, offset := range idx.offsets { + if err := binary.Write(file, binary.LittleEndian, offset); err != nil { + return fmt.Errorf("failed to write offset: %w", err) + } + } + + return nil +} + +// load reads the index from disk +func (idx *LineIndex) load() error { + idx.mutex.Lock() + defer idx.mutex.Unlock() + + file, err := os.Open(idx.indexPath) + if err != nil { + return fmt.Errorf("failed to open index file: %w", err) + } + defer file.Close() + + // Read source file size + if err := binary.Read(file, binary.LittleEndian, &idx.sourceSize); err != nil { + return fmt.Errorf("failed to read source size: %w", err) + } + + // Read number of offsets + var numOffsets int64 + if err := binary.Read(file, binary.LittleEndian, &numOffsets); err != nil { + return fmt.Errorf("failed to read offset count: %w", err) + } + + // Read offsets + idx.offsets = make([]int64, numOffsets) + for i := range idx.offsets { + if err := binary.Read(file, binary.LittleEndian, &idx.offsets[i]); err != nil { + return fmt.Errorf("failed to read offset: %w", err) + } + } + + return nil +} + +// getOffset returns the byte offset for a given line number (1-based) +func (idx *LineIndex) getOffset(lineNum int) (int64, error) { + idx.mutex.RLock() + defer idx.mutex.RUnlock() + + if lineNum < 1 || lineNum > len(idx.offsets) { + return 0, fmt.Errorf("line number out of range") + } + return idx.offsets[lineNum-1], nil +} + +// getLineCount returns the total number of lines in the indexed file +func (idx *LineIndex) getLineCount() int { + idx.mutex.RLock() + defer idx.mutex.RUnlock() + return len(idx.offsets) +} diff --git a/pkg/log/service.go b/pkg/log/service.go new file mode 100644 index 0000000..6875d40 --- /dev/null +++ b/pkg/log/service.go @@ -0,0 +1,494 @@ +package log + +import ( + "bufio" + "fmt" + "io" + "os" + "regexp" + "sync" + "syscall" +) + +const ( + // Size threshold for using memory mapping (100MB) + memoryMapThreshold = 100 * 1024 * 1024 + // Size of chunks for reading large files (1MB) + chunkSize = 1024 * 1024 + // Maximum number of lines that can be tailed + maxTailLines = 10000 +) + +// LogService handles operations related to log files +type LogService struct { + indexCache map[string]*LineIndex + cacheMutex sync.RWMutex +} + +// LogEntry represents a single log entry +type LogEntry struct { + LineNumber int `json:"line_number"` + Content string `json:"content"` +} + +// LogRange represents a range of log entries +type LogRange struct { + StartLine int `json:"start_line"` + EndLine int `json:"end_line"` + Entries []LogEntry `json:"entries"` +} + +// FilterOptions represents options for filtering log entries +type FilterOptions struct { + Pattern string // Regex pattern to match + IgnoreCase bool // Whether to ignore case in pattern matching + StartLine int // Start line number (1-based) + EndLine int // End line number (1-based) +} + +// NewLogService creates a new instance of LogService +func NewLogService() *LogService { + return &LogService{ + indexCache: make(map[string]*LineIndex), + } +} + +// getOrCreateIndex gets or creates a line index for a log file +func (s *LogService) getOrCreateIndex(filePath string) (*LineIndex, error) { + s.cacheMutex.RLock() + index, exists := s.indexCache[filePath] + s.cacheMutex.RUnlock() + + if exists { + return index, nil + } + + s.cacheMutex.Lock() + defer s.cacheMutex.Unlock() + + // Check again in case another goroutine created the index + if index, exists = s.indexCache[filePath]; exists { + return index, nil + } + + index, err := newLineIndex(filePath) + if err != nil { + return nil, err + } + + s.indexCache[filePath] = index + return index, nil +} + +// ReadLogRange reads a specific range of lines from a log file +func (s *LogService) ReadLogRange(filePath string, startLine, endLine int) (*LogRange, error) { + if startLine < 1 { + return nil, fmt.Errorf("start line must be >= 1") + } + if endLine < startLine { + return nil, fmt.Errorf("end line must be >= start line") + } + + // Get file info + info, err := os.Stat(filePath) + if err != nil { + return nil, fmt.Errorf("failed to get file info: %w", err) + } + + // For large files, use the index and memory mapping + if info.Size() > memoryMapThreshold { + return s.readLogRangeMMap(filePath, startLine, endLine) + } + + // For smaller files, use the original implementation + file, err := os.Open(filePath) + if err != nil { + return nil, fmt.Errorf("failed to open log file: %w", err) + } + defer file.Close() + + scanner := bufio.NewScanner(file) + currentLine := 0 + entries := []LogEntry{} + + for scanner.Scan() { + currentLine++ + if currentLine < startLine { + continue + } + if currentLine > endLine { + break + } + entries = append(entries, LogEntry{ + LineNumber: currentLine, + Content: scanner.Text(), + }) + } + + if err := scanner.Err(); err != nil { + return nil, fmt.Errorf("error reading log file: %w", err) + } + + return &LogRange{ + StartLine: startLine, + EndLine: endLine, + Entries: entries, + }, nil +} + +// readLogRangeMMap reads a range of lines using memory mapping for large files +func (s *LogService) readLogRangeMMap(filePath string, startLine, endLine int) (*LogRange, error) { + // Get or create the line index + index, err := s.getOrCreateIndex(filePath) + if err != nil { + return nil, fmt.Errorf("failed to get line index: %w", err) + } + + // Get file offsets for the requested lines + startOffset, err := index.getOffset(startLine) + if err != nil { + return nil, err + } + + endOffset, err := index.getOffset(endLine + 1) + if err != nil { + // If endLine is the last line, use the file size as endOffset + if endLine == index.getLineCount() { + endOffset = index.sourceSize + } else { + return nil, err + } + } + + // Open the file + file, err := os.OpenFile(filePath, os.O_RDONLY, 0) + if err != nil { + return nil, fmt.Errorf("failed to open log file: %w", err) + } + defer file.Close() + + // Memory map the file + data, err := syscall.Mmap(int(file.Fd()), startOffset, int(endOffset-startOffset), + syscall.PROT_READ, syscall.MAP_PRIVATE) + if err != nil { + return nil, fmt.Errorf("failed to memory map file: %w", err) + } + defer syscall.Munmap(data) + + // Process the mapped data + entries := make([]LogEntry, 0, endLine-startLine+1) + lineNum := startLine + start := 0 + + for i := 0; i < len(data); i++ { + if data[i] == '\n' || i == len(data)-1 { + end := i + if i == len(data)-1 && data[i] != '\n' { + end = i + 1 + } + entries = append(entries, LogEntry{ + LineNumber: lineNum, + Content: string(data[start:end]), + }) + lineNum++ + start = i + 1 + } + } + + return &LogRange{ + StartLine: startLine, + EndLine: endLine, + Entries: entries, + }, nil +} + +// FilterLog filters log entries based on provided options +func (s *LogService) FilterLog(filePath string, options FilterOptions) ([]LogEntry, error) { + // Get file info + info, err := os.Stat(filePath) + if err != nil { + return nil, fmt.Errorf("failed to get file info: %w", err) + } + + // For large files, process in chunks + if info.Size() > memoryMapThreshold { + return s.filterLogChunked(filePath, options) + } + + // For smaller files, use the original implementation + file, err := os.Open(filePath) + if err != nil { + return nil, fmt.Errorf("failed to open log file: %w", err) + } + defer file.Close() + + var pattern *regexp.Regexp + if options.Pattern != "" { + flags := "" + if options.IgnoreCase { + flags = "(?i)" + } + pattern, err = regexp.Compile(flags + options.Pattern) + if err != nil { + return nil, fmt.Errorf("invalid regex pattern: %w", err) + } + } + + scanner := bufio.NewScanner(file) + currentLine := 0 + var entries []LogEntry + + for scanner.Scan() { + currentLine++ + + if options.StartLine > 0 && currentLine < options.StartLine { + continue + } + if options.EndLine > 0 && currentLine > options.EndLine { + break + } + + line := scanner.Text() + + if pattern != nil && !pattern.MatchString(line) { + continue + } + + entries = append(entries, LogEntry{ + LineNumber: currentLine, + Content: line, + }) + } + + if err := scanner.Err(); err != nil { + return nil, fmt.Errorf("error reading log file: %w", err) + } + + return entries, nil +} + +// filterLogChunked processes a large log file in chunks +func (s *LogService) filterLogChunked(filePath string, options FilterOptions) ([]LogEntry, error) { + file, err := os.Open(filePath) + if err != nil { + return nil, fmt.Errorf("failed to open log file: %w", err) + } + defer file.Close() + + var pattern *regexp.Regexp + if options.Pattern != "" { + flags := "" + if options.IgnoreCase { + flags = "(?i)" + } + pattern, err = regexp.Compile(flags + options.Pattern) + if err != nil { + return nil, fmt.Errorf("invalid regex pattern: %w", err) + } + } + + var entries []LogEntry + buffer := make([]byte, chunkSize) + lineBuffer := []byte{} + currentLine := 0 + offset := int64(0) + + for { + n, err := file.ReadAt(buffer, offset) + if err != nil && err != io.EOF { + return nil, fmt.Errorf("error reading file chunk: %w", err) + } + + chunk := buffer[:n] + start := 0 + + for i := 0; i < len(chunk); i++ { + if chunk[i] == '\n' || (err == io.EOF && i == len(chunk)-1) { + // Complete the line with the buffered content + line := append(lineBuffer, chunk[start:i]...) + lineBuffer = lineBuffer[:0] + currentLine++ + + if options.StartLine > 0 && currentLine < options.StartLine { + start = i + 1 + continue + } + if options.EndLine > 0 && currentLine > options.EndLine { + return entries, nil + } + + lineStr := string(line) + if pattern == nil || pattern.MatchString(lineStr) { + entries = append(entries, LogEntry{ + LineNumber: currentLine, + Content: lineStr, + }) + } + start = i + 1 + } + } + + // Buffer any incomplete line + if start < len(chunk) { + lineBuffer = append(lineBuffer, chunk[start:]...) + } + + offset += int64(n) + if err == io.EOF { + break + } + } + + return entries, nil +} + +// TailLog returns the last n lines of a log file +func (s *LogService) TailLog(filePath string, n int) ([]LogEntry, error) { + if n < 1 { + return nil, fmt.Errorf("number of lines must be >= 1") + } + + // Add maximum limit check + if n > maxTailLines { + return nil, fmt.Errorf("requested number of lines exceeds maximum limit of %d", maxTailLines) + } + + file, err := os.Open(filePath) + if err != nil { + return nil, fmt.Errorf("failed to open log file: %w", err) + } + defer file.Close() + + // Create a ring buffer to store the last n lines + lines := make([]string, n) + lineNumbers := make([]int, n) + currentIndex := 0 + totalLines := 0 + + scanner := bufio.NewScanner(file) + for scanner.Scan() { + totalLines++ + lines[currentIndex] = scanner.Text() + lineNumbers[currentIndex] = totalLines + currentIndex = (currentIndex + 1) % n + } + + if err := scanner.Err(); err != nil { + return nil, fmt.Errorf("error reading log file: %w", err) + } + + // Create result slice with correct order + var entries []LogEntry + if totalLines < n { + // File has fewer lines than requested + for i := 0; i < totalLines; i++ { + entries = append(entries, LogEntry{ + LineNumber: lineNumbers[i], + Content: lines[i], + }) + } + } else { + // File has more lines than requested + for i := 0; i < n; i++ { + idx := (currentIndex + i) % n + entries = append(entries, LogEntry{ + LineNumber: lineNumbers[idx], + Content: lines[idx], + }) + } + } + + return entries, nil +} + +// StreamLog streams log file content with optional filtering +func (s *LogService) StreamLog(filePath string, options FilterOptions, writer io.Writer) error { + file, err := os.Open(filePath) + if err != nil { + return fmt.Errorf("failed to open log file: %w", err) + } + defer file.Close() + + var pattern *regexp.Regexp + if options.Pattern != "" { + flags := "" + if options.IgnoreCase { + flags = "(?i)" + } + pattern, err = regexp.Compile(flags + options.Pattern) + if err != nil { + return fmt.Errorf("invalid regex pattern: %w", err) + } + } + + scanner := bufio.NewScanner(file) + currentLine := 0 + + for scanner.Scan() { + currentLine++ + + // Skip lines before StartLine if specified + if options.StartLine > 0 && currentLine < options.StartLine { + continue + } + + // Stop after EndLine if specified + if options.EndLine > 0 && currentLine > options.EndLine { + break + } + + line := scanner.Text() + + // Apply pattern filtering if pattern is specified + if pattern != nil { + if !pattern.MatchString(line) { + continue + } + } + + // Write the line to the writer + if _, err := fmt.Fprintln(writer, line); err != nil { + return fmt.Errorf("error writing to output: %w", err) + } + } + + if err := scanner.Err(); err != nil { + return fmt.Errorf("error reading log file: %w", err) + } + + return nil +} + +// GetLogStats returns statistics about a log file +func (s *LogService) GetLogStats(filePath string) (map[string]interface{}, error) { + file, err := os.Open(filePath) + if err != nil { + return nil, fmt.Errorf("failed to open log file: %w", err) + } + defer file.Close() + + fileInfo, err := file.Stat() + if err != nil { + return nil, fmt.Errorf("failed to get file info: %w", err) + } + + stats := map[string]interface{}{ + "size_bytes": fileInfo.Size(), + "modified": fileInfo.ModTime(), + } + + // Count total lines + scanner := bufio.NewScanner(file) + lineCount := 0 + for scanner.Scan() { + lineCount++ + } + + if err := scanner.Err(); err != nil { + return nil, fmt.Errorf("error reading log file: %w", err) + } + + stats["total_lines"] = lineCount + + return stats, nil +} diff --git a/pkg/log/service_test.go b/pkg/log/service_test.go new file mode 100644 index 0000000..cee8062 --- /dev/null +++ b/pkg/log/service_test.go @@ -0,0 +1,232 @@ +package log + +import ( + "bytes" + "os" + "path/filepath" + "strings" + "testing" +) + +func createTestLogFile(t *testing.T) (string, func()) { + t.Helper() + + // Create a temporary log file + content := []string{ + "2024-03-20 10:00:00 INFO Starting application", + "2024-03-20 10:00:01 DEBUG Initializing database", + "2024-03-20 10:00:02 ERROR Failed to connect to database", + "2024-03-20 10:00:03 INFO Retrying database connection", + "2024-03-20 10:00:04 INFO Database connected successfully", + } + + tmpDir := t.TempDir() + logFile := filepath.Join(tmpDir, "test.log") + + err := os.WriteFile(logFile, []byte(strings.Join(content, "\n")), 0644) + if err != nil { + t.Fatalf("Failed to create test log file: %v", err) + } + + cleanup := func() { + os.Remove(logFile) + } + + return logFile, cleanup +} + +func TestReadLogRange(t *testing.T) { + logFile, cleanup := createTestLogFile(t) + defer cleanup() + + service := NewLogService() + + tests := []struct { + name string + startLine int + endLine int + want int // expected number of lines + wantErr bool + }{ + { + name: "valid range", + startLine: 2, + endLine: 4, + want: 3, + wantErr: false, + }, + { + name: "invalid start line", + startLine: 0, + endLine: 4, + want: 0, + wantErr: true, + }, + { + name: "invalid range", + startLine: 4, + endLine: 2, + want: 0, + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := service.ReadLogRange(logFile, tt.startLine, tt.endLine) + if (err != nil) != tt.wantErr { + t.Errorf("ReadLogRange() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !tt.wantErr && len(got.Entries) != tt.want { + t.Errorf("ReadLogRange() got %d entries, want %d", len(got.Entries), tt.want) + } + }) + } +} + +func TestFilterLog(t *testing.T) { + logFile, cleanup := createTestLogFile(t) + defer cleanup() + + service := NewLogService() + + tests := []struct { + name string + options FilterOptions + want int // expected number of lines + wantErr bool + }{ + { + name: "filter by pattern", + options: FilterOptions{ + Pattern: "INFO", + IgnoreCase: false, + }, + want: 3, + wantErr: false, + }, + { + name: "filter by line range", + options: FilterOptions{ + StartLine: 1, + EndLine: 3, + }, + want: 3, + wantErr: false, + }, + { + name: "filter by pattern and range", + options: FilterOptions{ + Pattern: "ERROR", + StartLine: 1, + EndLine: 4, + }, + want: 1, + wantErr: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := service.FilterLog(logFile, tt.options) + if (err != nil) != tt.wantErr { + t.Errorf("FilterLog() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !tt.wantErr && len(got) != tt.want { + t.Errorf("FilterLog() got %d entries, want %d", len(got), tt.want) + } + }) + } +} + +func TestTailLog(t *testing.T) { + logFile, cleanup := createTestLogFile(t) + defer cleanup() + + service := NewLogService() + + tests := []struct { + name string + n int + want int // expected number of lines + wantErr bool + }{ + { + name: "tail 3 lines", + n: 3, + want: 3, + wantErr: false, + }, + { + name: "tail all lines", + n: 10, + want: 5, // total lines in test file + wantErr: false, + }, + { + name: "invalid line count", + n: 0, + want: 0, + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := service.TailLog(logFile, tt.n) + if (err != nil) != tt.wantErr { + t.Errorf("TailLog() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !tt.wantErr && len(got) != tt.want { + t.Errorf("TailLog() got %d entries, want %d", len(got), tt.want) + } + }) + } +} + +func TestStreamLog(t *testing.T) { + logFile, cleanup := createTestLogFile(t) + defer cleanup() + + service := NewLogService() + + var buf bytes.Buffer + err := service.StreamLog(logFile, FilterOptions{ + Pattern: "INFO", + }, &buf) + + if err != nil { + t.Errorf("StreamLog() error = %v", err) + return + } + + output := buf.String() + count := strings.Count(output, "INFO") + if count != 3 { + t.Errorf("StreamLog() got %d INFO lines, want 3", count) + } +} + +func TestGetLogStats(t *testing.T) { + logFile, cleanup := createTestLogFile(t) + defer cleanup() + + service := NewLogService() + + stats, err := service.GetLogStats(logFile) + if err != nil { + t.Errorf("GetLogStats() error = %v", err) + return + } + + if stats["total_lines"].(int) != 5 { + t.Errorf("GetLogStats() got %d total lines, want 5", stats["total_lines"]) + } + + if stats["size_bytes"].(int64) <= 0 { + t.Errorf("GetLogStats() got invalid file size: %v", stats["size_bytes"]) + } +} diff --git a/pkg/monitoring/service.go b/pkg/monitoring/service.go index 2f139d7..a3e8cb0 100644 --- a/pkg/monitoring/service.go +++ b/pkg/monitoring/service.go @@ -2,12 +2,19 @@ package monitoring import ( "context" + "crypto/tls" + "crypto/x509" "fmt" + "net" "net/http" + "net/url" "strings" "sync" "time" + "github.com/chainlaunch/chainlaunch/pkg/certutils" + "github.com/chainlaunch/chainlaunch/pkg/logger" + nodes "github.com/chainlaunch/chainlaunch/pkg/nodes/service" "github.com/chainlaunch/chainlaunch/pkg/notifications" ) @@ -32,6 +39,7 @@ type Service interface { // service implements the Service interface type service struct { + logger *logger.Logger config *Config nodes map[int64]*Node nodesMutex sync.RWMutex @@ -41,15 +49,17 @@ type service struct { workerWaitGroup sync.WaitGroup lastCheckResults map[int64]*NodeCheck resultsMutex sync.RWMutex + nodeService *nodes.NodeService } // NewService creates a new monitoring service -func NewService(config *Config, notificationSvc notifications.Service) Service { +func NewService(logger *logger.Logger, config *Config, notificationSvc notifications.Service, nodeService *nodes.NodeService) Service { if config == nil { config = DefaultConfig() } return &service{ + logger: logger, config: config, nodes: make(map[int64]*Node), notificationSvc: notificationSvc, @@ -58,6 +68,7 @@ func NewService(config *Config, notificationSvc notifications.Service) Service { httpClient: &http.Client{ Timeout: config.DefaultTimeout, }, + nodeService: nodeService, } } @@ -172,6 +183,7 @@ func (s *service) worker(ctx context.Context, workerID int) { // checkNodes performs the actual node checks func (s *service) checkNodes(ctx context.Context) { + now := time.Now() // Get a copy of the nodes to check @@ -194,19 +206,88 @@ func (s *service) checkNodes(ctx context.Context) { func (s *service) checkNode(ctx context.Context, node *Node) { start := time.Now() - // Create a context with the node's timeout - checkCtx, cancel := context.WithTimeout(ctx, node.Timeout) - defer cancel() + // Parse the URL to get host and port + u, err := url.Parse(node.URL) + if err != nil { + s.handleNodeCheckResult(node, NodeStatusDown, 0, err) + return + } - // Create a request to check the node - req, err := http.NewRequestWithContext(checkCtx, http.MethodGet, node.URL, nil) + // Extract host and port + host := u.Host + if !strings.Contains(host, ":") { + // Default to port 443 for HTTPS + host = host + ":443" + } + + // Try to establish TCP connection + dialer := &net.Dialer{ + Timeout: node.Timeout, + } + nodeResponse, err := s.nodeService.GetNode(ctx, node.ID) + if err != nil { + s.handleNodeCheckResult(node, NodeStatusDown, 0, err) + return + } + var x509Cert *x509.Certificate + if nodeResponse.FabricPeer != nil { + tlsCertStr := nodeResponse.FabricPeer.TLSCert + if tlsCertStr != "" { + x509Cert, err = certutils.ParseX509Certificate([]byte(tlsCertStr)) + if err != nil { + s.handleNodeCheckResult(node, NodeStatusDown, 0, err) + return + } + } + } else if nodeResponse.FabricOrderer != nil { + tlsCertStr := nodeResponse.FabricOrderer.TLSCert + if tlsCertStr != "" { + x509Cert, err = certutils.ParseX509Certificate([]byte(tlsCertStr)) + if err != nil { + s.handleNodeCheckResult(node, NodeStatusDown, 0, err) + return + } + } + } + var tlsCert tls.Certificate + if x509Cert != nil { + tlsCert = tls.Certificate{ + Certificate: [][]byte{x509Cert.Raw}, + PrivateKey: nil, + } + } + tlsConfig := &tls.Config{ + Certificates: []tls.Certificate{tlsCert}, + } + // Try TLS connection + conn, err := tls.DialWithDialer(dialer, "tcp", host, tlsConfig) if err != nil { s.handleNodeCheckResult(node, NodeStatusDown, 0, err) return } + defer conn.Close() + + // Create HTTP request + req, err := http.NewRequestWithContext(ctx, "GET", node.URL, nil) + if err != nil { + s.handleNodeCheckResult(node, NodeStatusDown, 0, err) + return + } + + // Create a client with the TLS connection + transport := &http.Transport{ + TLSClientConfig: tlsConfig, + DialContext: func(ctx context.Context, network, addr string) (net.Conn, error) { + return conn, nil + }, + } + client := &http.Client{ + Transport: transport, + Timeout: node.Timeout, + } // Perform the request - resp, err := s.httpClient.Do(req) + resp, err := client.Do(req) responseTime := time.Since(start) if err != nil { @@ -290,30 +371,6 @@ func (s *service) handleNodeCheckResult(node *Node, status NodeStatus, responseT } } -// formatDuration formats a duration into a human-readable string -func formatDuration(d time.Duration) string { - days := int(d.Hours() / 24) - hours := int(d.Hours()) % 24 - minutes := int(d.Minutes()) % 60 - seconds := int(d.Seconds()) % 60 - - parts := []string{} - if days > 0 { - parts = append(parts, fmt.Sprintf("%dd", days)) - } - if hours > 0 { - parts = append(parts, fmt.Sprintf("%dh", hours)) - } - if minutes > 0 { - parts = append(parts, fmt.Sprintf("%dm", minutes)) - } - if seconds > 0 || len(parts) == 0 { - parts = append(parts, fmt.Sprintf("%ds", seconds)) - } - - return strings.Join(parts, " ") -} - // sendNodeDownNotification sends a notification that a node is down func (s *service) sendNodeDownNotification(ctx context.Context, node *Node, err error) { data := notifications.NodeDowntimeData{ @@ -328,7 +385,7 @@ func (s *service) sendNodeDownNotification(ctx context.Context, node *Node, err // Send the notification if err := s.notificationSvc.SendNodeDowntimeNotification(ctx, data); err != nil { // Just log the error; we don't want to create a notification loop - fmt.Printf("Failed to send node downtime notification: %v\n", err) + s.logger.Errorf("Failed to send node downtime notification: %v", err) } } @@ -347,6 +404,6 @@ func (s *service) sendNodeRecoveryNotification(ctx context.Context, node *Node, // Send the notification if err := s.notificationSvc.SendNodeRecoveryNotification(ctx, data); err != nil { // Just log the error; we don't want to create a notification loop - fmt.Printf("Failed to send node recovery notification: %v\n", err) + s.logger.Errorf("Failed to send node recovery notification: %v", err) } } diff --git a/pkg/networks/http/besu_types.go b/pkg/networks/http/besu_types.go index 86bb7c1..e9b7ab1 100644 --- a/pkg/networks/http/besu_types.go +++ b/pkg/networks/http/besu_types.go @@ -59,7 +59,7 @@ type BesuNetworkResponse struct { CreatedAt string `json:"createdAt"` UpdatedAt string `json:"updatedAt,omitempty"` Config json.RawMessage `json:"config,omitempty"` - GenesisConfig json.RawMessage `json:"genesisConfig,omitempty"` + GenesisConfig string `json:"genesisConfig,omitempty"` } // ListBesuNetworksResponse represents the response for listing Besu networks diff --git a/pkg/networks/http/handler.go b/pkg/networks/http/handler.go index 0915e73..1aaba62 100644 --- a/pkg/networks/http/handler.go +++ b/pkg/networks/http/handler.go @@ -13,6 +13,7 @@ import ( "encoding/base64" "github.com/chainlaunch/chainlaunch/pkg/networks/service" + "github.com/chainlaunch/chainlaunch/pkg/networks/service/fabric" "github.com/chainlaunch/chainlaunch/pkg/networks/service/types" nodeservice "github.com/chainlaunch/chainlaunch/pkg/nodes/service" nodetypes "github.com/chainlaunch/chainlaunch/pkg/nodes/types" @@ -57,6 +58,12 @@ func (h *Handler) RegisterRoutes(r chi.Router) { r.Get("/by-name/{name}", h.FabricNetworkGetByName) r.Post("/import", h.ImportFabricNetwork) r.Post("/import-with-org", h.ImportFabricNetworkWithOrg) + r.Post("/{id}/update-config", h.FabricUpdateChannelConfig) + r.Get("/{id}/blocks", h.FabricGetBlocks) + r.Get("/{id}/blocks/{blockNum}", h.FabricGetBlock) + r.Get("/{id}/info", h.GetChainInfo) + r.Get("/{id}/transactions/{txId}", h.FabricGetTransaction) + r.Post("/{id}/organization-crl", h.UpdateOrganizationCRL) }) // New Besu routes @@ -67,11 +74,12 @@ func (h *Handler) RegisterRoutes(r chi.Router) { r.Get("/{id}", h.BesuNetworkGet) r.Delete("/{id}", h.BesuNetworkDelete) }) + } // @Summary List Fabric networks // @Description Get a paginated list of Fabric networks -// @Tags fabric-networks +// @Tags Fabric Networks // @Produce json // @Param limit query int false "Number of items to return (default: 10)" // @Param offset query int false "Number of items to skip (default: 0)" @@ -127,7 +135,7 @@ func (h *Handler) FabricNetworkList(w http.ResponseWriter, r *http.Request) { // @Summary Create a new Fabric network // @Description Create a new Hyperledger Fabric network with the specified configuration -// @Tags fabric-networks +// @Tags Fabric Networks // @Accept json // @Produce json // @Param request body CreateFabricNetworkRequest true "Network creation request" @@ -222,7 +230,7 @@ func (h *Handler) FabricNetworkCreate(w http.ResponseWriter, r *http.Request) { // @Summary Join peer to Fabric network // @Description Join a peer node to an existing Fabric network -// @Tags fabric-networks +// @Tags Fabric Networks // @Accept json // @Produce json // @Param id path int true "Network ID" @@ -259,7 +267,7 @@ func (h *Handler) FabricNetworkJoinPeer(w http.ResponseWriter, r *http.Request) // @Summary Join orderer to Fabric network // @Description Join an orderer node to an existing Fabric network -// @Tags fabric-networks +// @Tags Fabric Networks // @Accept json // @Produce json // @Param id path int true "Network ID" @@ -296,7 +304,7 @@ func (h *Handler) FabricNetworkJoinOrderer(w http.ResponseWriter, r *http.Reques // @Summary Remove peer from Fabric network // @Description Remove a peer node from an existing Fabric network -// @Tags fabric-networks +// @Tags Fabric Networks // @Accept json // @Produce json // @Param id path int true "Network ID" @@ -332,7 +340,7 @@ func (h *Handler) FabricNetworkRemovePeer(w http.ResponseWriter, r *http.Request // @Summary Remove orderer from Fabric network // @Description Remove an orderer node from an existing Fabric network -// @Tags fabric-networks +// @Tags Fabric Networks // @Accept json // @Produce json // @Param id path int true "Network ID" @@ -368,7 +376,7 @@ func (h *Handler) FabricNetworkRemoveOrderer(w http.ResponseWriter, r *http.Requ // @Summary Get Fabric network channel configuration // @Description Retrieve the channel configuration for a Fabric network -// @Tags fabric-networks +// @Tags Fabric Networks // @Produce json // @Param id path int true "Network ID" // @Success 200 {object} ChannelConfigResponse @@ -401,7 +409,7 @@ func (h *Handler) FabricNetworkGetChannelConfig(w http.ResponseWriter, r *http.R // @Summary Get Fabric network current channel configuration // @Description Retrieve the current channel configuration for a Fabric network -// @Tags fabric-networks +// @Tags Fabric Networks // @Produce json // @Param id path int true "Network ID" // @Success 200 {object} ChannelConfigResponse @@ -434,7 +442,7 @@ func (h *Handler) FabricNetworkGetCurrentChannelConfig(w http.ResponseWriter, r // @Summary Delete a Fabric network // @Description Delete an existing Fabric network and all its resources -// @Tags fabric-networks +// @Tags Fabric Networks // @Produce json // @Param id path int true "Network ID" // @Success 204 "No Content" @@ -459,7 +467,7 @@ func (h *Handler) FabricNetworkDelete(w http.ResponseWriter, r *http.Request) { // @Summary Get a Fabric network by ID // @Description Get details of a specific Fabric network -// @Tags fabric-networks +// @Tags Fabric Networks // @Produce json // @Param id path int true "Network ID" // @Success 200 {object} NetworkResponse @@ -498,7 +506,7 @@ func (h *Handler) FabricNetworkGet(w http.ResponseWriter, r *http.Request) { // @Summary Get network nodes // @Description Get all nodes associated with a network -// @Tags fabric-networks +// @Tags Fabric Networks // @Produce json // @Param id path int true "Network ID" // @Success 200 {object} GetNetworkNodesResponse @@ -531,7 +539,7 @@ func (h *Handler) FabricNetworkGetNodes(w http.ResponseWriter, r *http.Request) // @Summary Add node to network // @Description Add a node (peer or orderer) to an existing network -// @Tags fabric-networks +// @Tags Fabric Networks // @Accept json // @Produce json // @Param id path int true "Network ID" @@ -573,7 +581,7 @@ func (h *Handler) FabricNetworkAddNode(w http.ResponseWriter, r *http.Request) { // @Summary Unjoin peer from Fabric network // @Description Remove a peer node from a channel but keep it in the network -// @Tags fabric-networks +// @Tags Fabric Networks // @Accept json // @Produce json // @Param id path int true "Network ID" @@ -609,7 +617,7 @@ func (h *Handler) FabricNetworkUnjoinPeer(w http.ResponseWriter, r *http.Request // @Summary Unjoin orderer from Fabric network // @Description Remove an orderer node from a channel but keep it in the network -// @Tags fabric-networks +// @Tags Fabric Networks // @Accept json // @Produce json // @Param id path int true "Network ID" @@ -645,7 +653,7 @@ func (h *Handler) FabricNetworkUnjoinOrderer(w http.ResponseWriter, r *http.Requ // @Summary Set anchor peers for an organization // @Description Set the anchor peers for an organization in a Fabric network -// @Tags fabric-networks +// @Tags Fabric Networks // @Accept json // @Produce json // @Param id path int true "Network ID" @@ -693,7 +701,7 @@ func (h *Handler) FabricNetworkSetAnchorPeers(w http.ResponseWriter, r *http.Req // @Summary Get network configuration // @Description Get the network configuration as YAML -// @Tags fabric-networks +// @Tags Fabric Networks // @Produce text/yaml // @Param id path int true "Network ID" // @Param orgId path int true "Organization ID" @@ -733,7 +741,7 @@ func (h *Handler) FabricNetworkGetOrganizationConfig(w http.ResponseWriter, r *h // @Summary Get a Fabric network by slug // @Description Get details of a specific Fabric network using its slug -// @Tags fabric-networks +// @Tags Fabric Networks // @Produce json // @Param slug path string true "Network Slug" // @Success 200 {object} NetworkResponse @@ -806,7 +814,7 @@ func writeError(w http.ResponseWriter, code int, error string, message string) { // @Summary List Besu networks // @Description Get a paginated list of Besu networks -// @Tags besu-networks +// @Tags Besu Networks // @Produce json // @Param limit query int false "Number of items to return (default: 10)" // @Param offset query int false "Number of items to skip (default: 0)" @@ -860,11 +868,11 @@ func (h *Handler) BesuNetworkList(w http.ResponseWriter, r *http.Request) { // @Summary Create a new Besu network // @Description Create a new Besu network with the specified configuration -// @Tags besu-networks +// @Tags Besu Networks // @Accept json // @Produce json // @Param request body CreateBesuNetworkRequest true "Network creation request" -// @Success 201 {object} BesuNetworkResponse +// @Success 200 {object} BesuNetworkResponse // @Failure 400 {object} ErrorResponse // @Failure 500 {object} ErrorResponse // @Router /networks/besu [post] @@ -879,7 +887,7 @@ func (h *Handler) BesuNetworkCreate(w http.ResponseWriter, r *http.Request) { writeError(w, http.StatusBadRequest, "validation_failed", err.Error()) return } - // writeError(w, http.StatusInternalServerError, "not_implemented", "Creating Besu network is not implemented yet") + // Create the Besu network config besuConfig := types.BesuNetworkConfig{ ChainID: req.Config.ChainID, @@ -916,12 +924,20 @@ func (h *Handler) BesuNetworkCreate(w http.ResponseWriter, r *http.Request) { // Return network response resp := mapBesuNetworkToResponse(*network) - writeJSON(w, http.StatusCreated, resp) + + // Ensure proper JSON response + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + if err := json.NewEncoder(w).Encode(resp); err != nil { + // If encoding fails, log the error and return a generic error + writeError(w, http.StatusInternalServerError, "response_encoding_failed", "Failed to encode response") + return + } } // @Summary Get a Besu network by ID // @Description Get details of a specific Besu network -// @Tags besu-networks +// @Tags Besu Networks // @Produce json // @Param id path int true "Network ID" // @Success 200 {object} BesuNetworkResponse @@ -952,7 +968,7 @@ func (h *Handler) BesuNetworkGet(w http.ResponseWriter, r *http.Request) { // @Summary Delete a Besu network // @Description Delete an existing Besu network and all its resources -// @Tags besu-networks +// @Tags Besu Networks // @Produce json // @Param id path int true "Network ID" // @Success 204 "No Content" @@ -988,10 +1004,8 @@ func mapBesuNetworkToResponse(n service.Network) BesuNetworkResponse { if err := json.Unmarshal(n.Config, &config); err == nil { chainID = config.ChainID } - } - var genesisConfig json.RawMessage - if n.GenesisBlock != "" { - genesisConfig = json.RawMessage(n.GenesisBlock) + } else { + chainID = 0 } return BesuNetworkResponse{ ID: n.ID, @@ -1002,14 +1016,14 @@ func mapBesuNetworkToResponse(n service.Network) BesuNetworkResponse { CreatedAt: n.CreatedAt.Format(time.RFC3339), UpdatedAt: updatedAt, Config: n.Config, - GenesisConfig: genesisConfig, + GenesisConfig: n.GenesisBlock, Platform: n.Platform, } } // @Summary Reload network config block // @Description Reloads the current config block for a network -// @Tags fabric-networks +// @Tags Fabric Networks // @Accept json // @Produce json // @Param id path int true "Network ID" @@ -1052,7 +1066,7 @@ func (h *Handler) ReloadNetworkBlock(w http.ResponseWriter, r *http.Request) { // @Summary Import a Fabric network // @Description Import an existing Fabric network using its genesis block -// @Tags fabric-networks +// @Tags Fabric Networks // @Accept json // @Produce json // @Param request body ImportFabricNetworkRequest true "Import network request" @@ -1099,7 +1113,7 @@ func (h *Handler) ImportFabricNetwork(w http.ResponseWriter, r *http.Request) { // @Summary Import a Fabric network with organization // @Description Import an existing Fabric network using organization details -// @Tags fabric-networks +// @Tags Fabric Networks // @Accept json // @Produce json // @Param request body ImportFabricNetworkWithOrgRequest true "Import network with org request" @@ -1148,7 +1162,7 @@ func (h *Handler) ImportFabricNetworkWithOrg(w http.ResponseWriter, r *http.Requ // @Summary Import a Besu network // @Description Import an existing Besu network using its genesis file -// @Tags besu-networks +// @Tags Besu Networks // @Accept json // @Produce json // @Param request body ImportBesuNetworkRequest true "Import network request" @@ -1387,14 +1401,14 @@ type UpdateBatchTimeoutPayload struct { Timeout string `json:"timeout" validate:"required"` // e.g., "2s" } -// PrepareConfigUpdateRequest represents a request to prepare a config update -type PrepareConfigUpdateRequest struct { +// UpdateFabricNetworkRequest represents a request to update a Fabric network +type UpdateFabricNetworkRequest struct { Operations []ConfigUpdateOperationRequest `json:"operations" validate:"required,min=1,dive"` } // @Summary Submit config update proposal // @Description Submit a signed config update proposal for execution -// @Tags fabric-networks +// @Tags Fabric Networks // @Accept json // @Produce json // @Param id path int true "Network ID" @@ -1413,3 +1427,451 @@ type PrepareConfigUpdateRequest struct { func (h *Handler) DummyHandler(w http.ResponseWriter, r *http.Request) { writeError(w, http.StatusBadRequest, "dummy_error", "Dummy error") } + +// @Summary Prepare a config update for a Fabric network +// @Description Prepare a config update proposal for a Fabric network using the provided operations. +// @Description The following operation types are supported: +// @Description - add_org: Add a new organization to the channel +// @Description - remove_org: Remove an organization from the channel +// @Description - update_org_msp: Update an organization's MSP configuration +// @Description - set_anchor_peers: Set anchor peers for an organization +// @Description - add_consenter: Add a new consenter to the orderer +// @Description - remove_consenter: Remove a consenter from the orderer +// @Description - update_consenter: Update a consenter in the orderer +// @Description - update_etcd_raft_options: Update etcd raft options for the orderer +// @Description - update_batch_size: Update batch size for the orderer +// @Description - update_batch_timeout: Update batch timeout for the orderer +// @Tags Fabric Networks +// @Accept json +// @Produce json +// @Param id path int true "Network ID" +// @Param request body UpdateFabricNetworkRequest true "Config update operations" +// @Success 200 {object} ConfigUpdateResponse +// @Failure 400 {object} ErrorResponse +// @Failure 500 {object} ErrorResponse +// @Router /networks/fabric/{id}/update-config [post] +func (h *Handler) FabricUpdateChannelConfig(w http.ResponseWriter, r *http.Request) { + // Parse network ID from URL + networkID, err := strconv.ParseInt(chi.URLParam(r, "id"), 10, 64) + if err != nil { + writeError(w, http.StatusBadRequest, "invalid_network_id", "Invalid network ID") + return + } + + // Parse request body + var req UpdateFabricNetworkRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + writeError(w, http.StatusBadRequest, "invalid_request", "Invalid request body") + return + } + + // Validate request + if err := h.validate.Struct(req); err != nil { + writeError(w, http.StatusBadRequest, "validation_error", err.Error()) + return + } + + // Validate each operation's payload + for i, op := range req.Operations { + switch op.Type { + case "add_org": + var payload AddOrgPayload + if err := json.Unmarshal(op.Payload, &payload); err != nil { + writeError(w, http.StatusBadRequest, "invalid_payload", fmt.Sprintf("Invalid payload for operation %d: %s", i, err.Error())) + return + } + if err := h.validate.Struct(payload); err != nil { + writeError(w, http.StatusBadRequest, "validation_error", fmt.Sprintf("Invalid payload for operation %d: %s", i, err.Error())) + return + } + case "remove_org": + var payload RemoveOrgPayload + if err := json.Unmarshal(op.Payload, &payload); err != nil { + writeError(w, http.StatusBadRequest, "invalid_payload", fmt.Sprintf("Invalid payload for operation %d: %s", i, err.Error())) + return + } + if err := h.validate.Struct(payload); err != nil { + writeError(w, http.StatusBadRequest, "validation_error", fmt.Sprintf("Invalid payload for operation %d: %s", i, err.Error())) + return + } + case "update_org_msp": + var payload UpdateOrgMSPPayload + if err := json.Unmarshal(op.Payload, &payload); err != nil { + writeError(w, http.StatusBadRequest, "invalid_payload", fmt.Sprintf("Invalid payload for operation %d: %s", i, err.Error())) + return + } + if err := h.validate.Struct(payload); err != nil { + writeError(w, http.StatusBadRequest, "validation_error", fmt.Sprintf("Invalid payload for operation %d: %s", i, err.Error())) + return + } + case "set_anchor_peers": + var payload SetAnchorPeersPayload + if err := json.Unmarshal(op.Payload, &payload); err != nil { + writeError(w, http.StatusBadRequest, "invalid_payload", fmt.Sprintf("Invalid payload for operation %d: %s", i, err.Error())) + return + } + if err := h.validate.Struct(payload); err != nil { + writeError(w, http.StatusBadRequest, "validation_error", fmt.Sprintf("Invalid payload for operation %d: %s", i, err.Error())) + return + } + case "add_consenter": + var payload AddConsenterPayload + if err := json.Unmarshal(op.Payload, &payload); err != nil { + writeError(w, http.StatusBadRequest, "invalid_payload", fmt.Sprintf("Invalid payload for operation %d: %s", i, err.Error())) + return + } + if err := h.validate.Struct(payload); err != nil { + writeError(w, http.StatusBadRequest, "validation_error", fmt.Sprintf("Invalid payload for operation %d: %s", i, err.Error())) + return + } + case "remove_consenter": + var payload RemoveConsenterPayload + if err := json.Unmarshal(op.Payload, &payload); err != nil { + writeError(w, http.StatusBadRequest, "invalid_payload", fmt.Sprintf("Invalid payload for operation %d: %s", i, err.Error())) + return + } + if err := h.validate.Struct(payload); err != nil { + writeError(w, http.StatusBadRequest, "validation_error", fmt.Sprintf("Invalid payload for operation %d: %s", i, err.Error())) + return + } + case "update_consenter": + var payload UpdateConsenterPayload + if err := json.Unmarshal(op.Payload, &payload); err != nil { + writeError(w, http.StatusBadRequest, "invalid_payload", fmt.Sprintf("Invalid payload for operation %d: %s", i, err.Error())) + return + } + if err := h.validate.Struct(payload); err != nil { + writeError(w, http.StatusBadRequest, "validation_error", fmt.Sprintf("Invalid payload for operation %d: %s", i, err.Error())) + return + } + case "update_etcd_raft_options": + var payload UpdateEtcdRaftOptionsPayload + if err := json.Unmarshal(op.Payload, &payload); err != nil { + writeError(w, http.StatusBadRequest, "invalid_payload", fmt.Sprintf("Invalid payload for operation %d: %s", i, err.Error())) + return + } + if err := h.validate.Struct(payload); err != nil { + writeError(w, http.StatusBadRequest, "validation_error", fmt.Sprintf("Invalid payload for operation %d: %s", i, err.Error())) + return + } + case "update_batch_size": + var payload UpdateBatchSizePayload + if err := json.Unmarshal(op.Payload, &payload); err != nil { + writeError(w, http.StatusBadRequest, "invalid_payload", fmt.Sprintf("Invalid payload for operation %d: %s", i, err.Error())) + return + } + if err := h.validate.Struct(payload); err != nil { + writeError(w, http.StatusBadRequest, "validation_error", fmt.Sprintf("Invalid payload for operation %d: %s", i, err.Error())) + return + } + case "update_batch_timeout": + var payload UpdateBatchTimeoutPayload + if err := json.Unmarshal(op.Payload, &payload); err != nil { + writeError(w, http.StatusBadRequest, "invalid_payload", fmt.Sprintf("Invalid payload for operation %d: %s", i, err.Error())) + return + } + if err := h.validate.Struct(payload); err != nil { + writeError(w, http.StatusBadRequest, "validation_error", fmt.Sprintf("Invalid payload for operation %d: %s", i, err.Error())) + return + } + // Validate that the timeout is a valid duration + if _, err := time.ParseDuration(payload.Timeout); err != nil { + writeError(w, http.StatusBadRequest, "validation_error", fmt.Sprintf("Invalid timeout for operation %d: %s", i, err.Error())) + return + } + default: + writeError(w, http.StatusBadRequest, "invalid_operation_type", fmt.Sprintf("Unsupported operation type: %s", op.Type)) + return + } + } + + // Convert operations to fabric.ConfigUpdateOperation + operations := make([]fabric.ConfigUpdateOperation, len(req.Operations)) + for i, op := range req.Operations { + operations[i] = fabric.ConfigUpdateOperation{ + Type: fabric.ConfigUpdateOperationType(op.Type), + Payload: op.Payload, + } + } + + // Call service to prepare config update + proposal, err := h.networkService.UpdateFabricNetwork(r.Context(), networkID, operations) + if err != nil { + writeError(w, http.StatusInternalServerError, "prepare_config_update_failed", err.Error()) + return + } + + // Create response + resp := ConfigUpdateResponse{ + ID: proposal.ID, + NetworkID: proposal.NetworkID, + ChannelName: proposal.ChannelName, + Status: proposal.Status, + CreatedAt: proposal.CreatedAt, + CreatedBy: proposal.CreatedBy, + Operations: req.Operations, + } + + // Return response + writeJSON(w, http.StatusOK, resp) +} + +// ConfigUpdateResponse represents the response from preparing a config update +type ConfigUpdateResponse struct { + ID string `json:"id"` + NetworkID int64 `json:"network_id"` + ChannelName string `json:"channel_name"` + Status string `json:"status"` + CreatedAt time.Time `json:"created_at"` + CreatedBy string `json:"created_by"` + Operations []ConfigUpdateOperationRequest `json:"operations"` + PreviewJSON string `json:"preview_json,omitempty"` +} + +// @Summary Get list of blocks from Fabric network +// @Description Get a paginated list of blocks from a Fabric network +// @Tags Fabric Networks +// @Produce json +// @Param id path int true "Network ID" +// @Param limit query int false "Number of blocks to return (default: 10)" +// @Param offset query int false "Number of blocks to skip (default: 0)" +// @Param reverse query bool false "Get blocks in reverse order (default: false)" +// @Success 200 {object} BlockListResponse +// @Failure 400 {object} ErrorResponse +// @Failure 404 {object} ErrorResponse +// @Failure 500 {object} ErrorResponse +// @Router /networks/fabric/{id}/blocks [get] +func (h *Handler) FabricGetBlocks(w http.ResponseWriter, r *http.Request) { + networkID, err := strconv.ParseInt(chi.URLParam(r, "id"), 10, 64) + if err != nil { + writeError(w, http.StatusBadRequest, "invalid_network_id", "Invalid network ID") + return + } + + // Parse pagination parameters + limit := int32(10) // Default limit + offset := int32(0) // Default offset + reverse := false // Default order + + if limitStr := r.URL.Query().Get("limit"); limitStr != "" { + limitInt, err := strconv.ParseInt(limitStr, 10, 32) + if err != nil { + writeError(w, http.StatusBadRequest, "invalid_limit", "Invalid limit parameter") + return + } + limit = int32(limitInt) + } + + if offsetStr := r.URL.Query().Get("offset"); offsetStr != "" { + offsetInt, err := strconv.ParseInt(offsetStr, 10, 32) + if err != nil { + writeError(w, http.StatusBadRequest, "invalid_offset", "Invalid offset parameter") + return + } + offset = int32(offsetInt) + } + + if reverseStr := r.URL.Query().Get("reverse"); reverseStr != "" { + reverseBool, err := strconv.ParseBool(reverseStr) + if err != nil { + writeError(w, http.StatusBadRequest, "invalid_reverse", "Invalid reverse parameter") + return + } + reverse = reverseBool + } + + blocks, total, err := h.networkService.GetBlocks(r.Context(), networkID, limit, offset, reverse) + if err != nil { + writeError(w, http.StatusInternalServerError, "get_blocks_failed", err.Error()) + return + } + + resp := BlockListResponse{ + Blocks: blocks, + Total: total, + } + writeJSON(w, http.StatusOK, resp) +} + +// @Summary Get transactions from a specific block +// @Description Get all transactions from a specific block in a Fabric network +// @Tags Fabric Networks +// @Produce json +// @Param id path int true "Network ID" +// @Param blockNum path int true "Block Number" +// @Success 200 {object} BlockTransactionsResponse +// @Failure 400 {object} ErrorResponse +// @Failure 404 {object} ErrorResponse +// @Failure 500 {object} ErrorResponse +// @Router /networks/fabric/{id}/blocks/{blockNum} [get] +func (h *Handler) FabricGetBlock(w http.ResponseWriter, r *http.Request) { + networkID, err := strconv.ParseInt(chi.URLParam(r, "id"), 10, 64) + if err != nil { + writeError(w, http.StatusBadRequest, "invalid_network_id", "Invalid network ID") + return + } + + blockNum, err := strconv.ParseUint(chi.URLParam(r, "blockNum"), 10, 64) + if err != nil { + writeError(w, http.StatusBadRequest, "invalid_block_number", "Invalid block number") + return + } + + blck, err := h.networkService.GetBlockTransactions(r.Context(), networkID, blockNum) + if err != nil { + if err.Error() == "block not found" { + writeError(w, http.StatusNotFound, "block_not_found", "Block not found") + return + } + writeError(w, http.StatusInternalServerError, "get_transactions_failed", err.Error()) + return + } + + resp := BlockTransactionsResponse{ + Block: &blck.Block, + Transactions: blck.Transactions, + } + writeJSON(w, http.StatusOK, resp) +} + +// @Summary Get transaction details by transaction ID +// @Description Get detailed information about a specific transaction in a Fabric network +// @Tags Fabric Networks +// @Produce json +// @Param id path int true "Network ID" +// @Param txId path string true "Transaction ID" +// @Success 200 {object} TransactionResponse +// @Failure 400 {object} ErrorResponse +// @Failure 404 {object} ErrorResponse +// @Failure 500 {object} ErrorResponse +// @Router /networks/fabric/{id}/transactions/{txId} [get] +func (h *Handler) FabricGetTransaction(w http.ResponseWriter, r *http.Request) { + networkID, err := strconv.ParseInt(chi.URLParam(r, "id"), 10, 64) + if err != nil { + writeError(w, http.StatusBadRequest, "invalid_network_id", "Invalid network ID") + return + } + + txID := chi.URLParam(r, "txId") + if txID == "" { + writeError(w, http.StatusBadRequest, "invalid_transaction_id", "Invalid transaction ID") + return + } + + transaction, err := h.networkService.GetTransaction(r.Context(), networkID, txID) + if err != nil { + if err.Error() == "transaction not found" { + writeError(w, http.StatusNotFound, "transaction_not_found", "Transaction not found") + return + } + writeError(w, http.StatusInternalServerError, "get_transaction_failed", err.Error()) + return + } + + resp := TransactionResponse{ + Transaction: transaction, + } + writeJSON(w, http.StatusOK, resp) +} + +// UpdateOrganizationCRLRequest represents the request to update an organization's CRL +type UpdateOrganizationCRLRequest struct { + OrganizationID int64 `json:"organizationId" validate:"required"` +} + +// UpdateOrganizationCRLResponse represents the response from updating an organization's CRL +type UpdateOrganizationCRLResponse struct { + TransactionID string `json:"transactionId"` +} + +// @Summary Update organization CRL +// @Description Update the Certificate Revocation List (CRL) for an organization in the network +// @Tags Fabric Networks +// @Accept json +// @Produce json +// @Param id path int true "Network ID" +// @Param request body UpdateOrganizationCRLRequest true "Organization CRL update request" +// @Success 200 {object} UpdateOrganizationCRLResponse +// @Failure 400 {object} ErrorResponse +// @Failure 404 {object} ErrorResponse +// @Failure 500 {object} ErrorResponse +// @Router /networks/fabric/{id}/organization-crl [post] +func (h *Handler) UpdateOrganizationCRL(w http.ResponseWriter, r *http.Request) { + // Parse network ID from URL + networkID, err := strconv.ParseInt(chi.URLParam(r, "id"), 10, 64) + if err != nil { + writeError(w, http.StatusBadRequest, "invalid_network_id", "Invalid network ID") + return + } + + // Parse request body + var req UpdateOrganizationCRLRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + writeError(w, http.StatusBadRequest, "invalid_request", "Invalid request body") + return + } + + // Validate request + if err := h.validate.Struct(req); err != nil { + writeError(w, http.StatusBadRequest, "validation_failed", err.Error()) + return + } + + // Update CRL using network service + txID, err := h.networkService.UpdateOrganizationCRL(r.Context(), networkID, req.OrganizationID) + if err != nil { + if err.Error() == "organization not found" { + writeError(w, http.StatusNotFound, "org_not_found", "Organization not found") + return + } + writeError(w, http.StatusInternalServerError, "update_crl_failed", err.Error()) + return + } + + // Return response + resp := UpdateOrganizationCRLResponse{ + TransactionID: txID, + } + writeJSON(w, http.StatusOK, resp) +} + +// @Summary Get Fabric chain information +// @Description Retrieve detailed information about the Fabric blockchain including height and block hashes +// @Tags Fabric Networks +// @Accept json +// @Produce json +// @Param id path int true "Network ID" +// @Success 200 {object} ChainInfoResponse +// @Failure 400 {object} ErrorResponse +// @Failure 404 {object} ErrorResponse +// @Failure 500 {object} ErrorResponse +// @Router /networks/fabric/{id}/info [get] +func (h *Handler) GetChainInfo(w http.ResponseWriter, r *http.Request) { + // Parse network ID from URL + networkID, err := strconv.ParseInt(chi.URLParam(r, "id"), 10, 64) + if err != nil { + writeError(w, http.StatusBadRequest, "invalid_network_id", "Invalid network ID") + return + } + + // Get chain info from service layer + chainInfo, err := h.networkService.GetChainInfo(r.Context(), networkID) + if err != nil { + writeError(w, http.StatusInternalServerError, "get_chain_info_failed", err.Error()) + return + } + + // Return response + resp := ChainInfoResponse{ + Height: chainInfo.Height, + CurrentBlockHash: chainInfo.CurrentBlockHash, + PreviousBlockHash: chainInfo.PreviousBlockHash, + } + writeJSON(w, http.StatusOK, resp) +} + +type ChainInfoResponse struct { + Height uint64 `json:"height"` + CurrentBlockHash string `json:"currentBlockHash"` + PreviousBlockHash string `json:"previousBlockHash"` +} diff --git a/pkg/networks/http/types.go b/pkg/networks/http/types.go index ae4113a..4b7c0fa 100644 --- a/pkg/networks/http/types.go +++ b/pkg/networks/http/types.go @@ -138,3 +138,20 @@ type ImportBesuNetworkRequest struct { Description string `json:"description"` ChainID int64 `json:"chainId" validate:"required"` } + +// BlockListResponse represents the response for listing blocks +type BlockListResponse struct { + Blocks []networksservice.Block `json:"blocks"` + Total int64 `json:"total"` +} + +// BlockTransactionsResponse represents the response for listing transactions in a block +type BlockTransactionsResponse struct { + Block *networksservice.Block `json:"block"` + Transactions []networksservice.Transaction `json:"transactions"` +} + +// TransactionResponse represents the response for getting a single transaction +type TransactionResponse struct { + Transaction networksservice.Transaction `json:"transaction"` +} diff --git a/pkg/networks/service/besu/deployer.go b/pkg/networks/service/besu/deployer.go index ffd052f..0a9d6be 100644 --- a/pkg/networks/service/besu/deployer.go +++ b/pkg/networks/service/besu/deployer.go @@ -139,7 +139,7 @@ func (d *BesuDeployer) CreateGenesisBlock(networkID int64, config interface{}) ( } // Update network with genesis block - _, err = d.db.UpdateNetworkGenesisBlock(context.Background(), db.UpdateNetworkGenesisBlockParams{ + _, err = d.db.UpdateNetworkGenesisBlock(context.Background(), &db.UpdateNetworkGenesisBlockParams{ ID: networkID, GenesisBlockB64: sql.NullString{ String: string(genesisJSON), @@ -152,7 +152,7 @@ func (d *BesuDeployer) CreateGenesisBlock(networkID int64, config interface{}) ( // Create network nodes for _, validator := range validators { - _, err = d.db.CreateNetworkNode(ctx, db.CreateNetworkNodeParams{ + _, err = d.db.CreateNetworkNode(ctx, &db.CreateNetworkNodeParams{ NetworkID: networkID, NodeID: validator.ID, Status: "pending", @@ -192,7 +192,7 @@ func (d *BesuDeployer) createExtraData(validators []BesuNode) (string, error) { for i, validator := range validators { validatorAddresses[i] = common.HexToAddress(validator.Address) } - + d.logger.Info("validatorAddresses: %v", validatorAddresses) // First, RLP encode the main components rlpList := []interface{}{ make([]byte, EXTRA_VANITY_LENGTH), // 32 bytes of zeros @@ -287,7 +287,7 @@ func (d *BesuDeployer) ImportNetwork(ctx context.Context, genesisFile []byte, na networkID := uuid.New().String() // Create network in database - _, err := d.db.CreateNetworkFull(ctx, db.CreateNetworkFullParams{ + _, err := d.db.CreateNetworkFull(ctx, &db.CreateNetworkFullParams{ Name: name, Platform: "besu", Description: sql.NullString{String: description, Valid: description != ""}, diff --git a/pkg/networks/service/fabric/deployer.go b/pkg/networks/service/fabric/deployer.go index 89f667b..7deccf8 100644 --- a/pkg/networks/service/fabric/deployer.go +++ b/pkg/networks/service/fabric/deployer.go @@ -7,35 +7,43 @@ import ( "encoding/base64" "encoding/json" "encoding/pem" + "errors" "fmt" "net" "strconv" "strings" "time" + "github.com/golang/protobuf/ptypes" + "bytes" "text/template" "github.com/Masterminds/sprig/v3" + "github.com/chainlaunch/chainlaunch/internal/protoutil" "github.com/chainlaunch/chainlaunch/pkg/certutils" "github.com/chainlaunch/chainlaunch/pkg/db" "github.com/chainlaunch/chainlaunch/pkg/fabric/channel" orgservicefabric "github.com/chainlaunch/chainlaunch/pkg/fabric/service" keymanagement "github.com/chainlaunch/chainlaunch/pkg/keymanagement/service" "github.com/chainlaunch/chainlaunch/pkg/logger" + "github.com/chainlaunch/chainlaunch/pkg/networks/service/fabric/org" fabricorg "github.com/chainlaunch/chainlaunch/pkg/networks/service/fabric/org" "github.com/chainlaunch/chainlaunch/pkg/networks/service/types" nodeservice "github.com/chainlaunch/chainlaunch/pkg/nodes/service" nodetypes "github.com/chainlaunch/chainlaunch/pkg/nodes/types" "github.com/golang/protobuf/proto" "github.com/google/uuid" + "github.com/hyperledger/fabric-admin-sdk/pkg/network" "github.com/hyperledger/fabric-config/configtx" + "github.com/hyperledger/fabric-config/configtx/membership" "github.com/hyperledger/fabric-config/configtx/orderer" + ordererapi "github.com/hyperledger/fabric-protos-go-apiv2/orderer" + "google.golang.org/grpc" + "github.com/hyperledger/fabric-config/protolator" - cb "github.com/hyperledger/fabric-protos-go/common" - "github.com/hyperledger/fabric-sdk-go/pkg/fab/resource" - "github.com/hyperledger/fabric/protoutil" + cb "github.com/hyperledger/fabric-protos-go-apiv2/common" ) // ConfigUpdateOperationType represents the type of configuration update operation @@ -134,10 +142,6 @@ func (op *AddOrgOperation) Modify(ctx context.Context, c *configtx.ConfigTx) err mspID := op.MSPID // Create a new organization in the application group - appOrg := c.Application().Organization(mspID) - if appOrg == nil { - return fmt.Errorf("failed to create application organization") - } var rootCerts []*x509.Certificate for _, rootCertStr := range op.RootCerts { @@ -156,13 +160,56 @@ func (op *AddOrgOperation) Modify(ctx context.Context, c *configtx.ConfigTx) err } tlsRootCerts = append(tlsRootCerts, tlsRootCert) } + signCACert := rootCerts[0] // Set MSP configuration - err := appOrg.SetMSP(configtx.MSP{ - Name: mspID, - RootCerts: rootCerts, - TLSRootCerts: tlsRootCerts, - Admins: []*x509.Certificate{}, + err := c.Application().SetOrganization(configtx.Organization{ + Name: mspID, + MSP: configtx.MSP{ + Name: mspID, + RootCerts: rootCerts, + TLSRootCerts: tlsRootCerts, + Admins: []*x509.Certificate{}, + NodeOUs: membership.NodeOUs{ + Enable: true, + ClientOUIdentifier: membership.OUIdentifier{ + Certificate: signCACert, + OrganizationalUnitIdentifier: "client", + }, + PeerOUIdentifier: membership.OUIdentifier{ + Certificate: signCACert, + OrganizationalUnitIdentifier: "peer", + }, + AdminOUIdentifier: membership.OUIdentifier{ + Certificate: signCACert, + OrganizationalUnitIdentifier: "admin", + }, + OrdererOUIdentifier: membership.OUIdentifier{ + Certificate: signCACert, + OrganizationalUnitIdentifier: "orderer", + }, + }, + }, + AnchorPeers: []configtx.Address{}, + OrdererEndpoints: []string{}, + Policies: map[string]configtx.Policy{ + "Admins": { + Type: "Signature", + Rule: fmt.Sprintf("OR('%s.admin')", mspID), + }, + "Readers": { + Type: "Signature", + Rule: fmt.Sprintf("OR('%s.member')", mspID), + }, + "Writers": { + Type: "Signature", + Rule: fmt.Sprintf("OR('%s.member')", mspID), + }, + "Endorsement": { + Type: "Signature", + Rule: fmt.Sprintf("OR('%s.member')", mspID), + }, + }, }) if err != nil { return fmt.Errorf("failed to set MSP configuration: %w", err) @@ -602,6 +649,73 @@ func CreateConfigModifier(operation ConfigUpdateOperation) (ConfigModifier, erro return modifier, nil } +// UpdateChannelConfig updates the channel configuration with the provided config update envelope and signatures +func (d *FabricDeployer) UpdateChannelConfig(ctx context.Context, networkID int64, configUpdateEnvelope []byte, signingOrgIDs []string, ordererAddress string, ordererTLSCert string) (string, error) { + // Get network details + network, err := d.db.GetNetwork(ctx, networkID) + if err != nil { + return "", fmt.Errorf("failed to get network: %w", err) + } + + // Unmarshal the config update envelope + envelope := &cb.Envelope{} + if err := proto.Unmarshal(configUpdateEnvelope, envelope); err != nil { + return "", fmt.Errorf("failed to unmarshal config update envelope: %w", err) + } + + // Collect signatures from the specified organizations + for _, orgID := range signingOrgIDs { + // Get organization details and MSP + orgService := org.NewOrganizationService(d.orgService, d.keyMgmt, d.logger, orgID, d.db) + + // Sign the config update + envelope, err = orgService.CreateConfigSignature(ctx, network.Name, envelope) + if err != nil { + return "", fmt.Errorf("failed to sign config update for org %s: %w", orgID, err) + } + } + + ordererConn, err := d.createOrdererConnection(ordererAddress, ordererTLSCert) + if err != nil { + return "", fmt.Errorf("failed to create orderer connection: %w", err) + } + defer ordererConn.Close() + ordererClient, err := ordererapi.NewAtomicBroadcastClient(ordererConn).Broadcast(context.Background()) + if err != nil { + return "", fmt.Errorf("failed to create orderer client: %w", err) + } + err = ordererClient.Send(envelope) + if err != nil { + return "", fmt.Errorf("failed to send envelope: %w", err) + } + response, err := ordererClient.Recv() + if err != nil { + return "", fmt.Errorf("failed to receive response: %w", err) + } + return response.String(), nil + +} + +// CreateOrdererConnection establishes a gRPC connection to an orderer +func (d *FabricDeployer) createOrdererConnection(ordererURL string, ordererTLSCACert string) (*grpc.ClientConn, error) { + d.logger.Info("Creating orderer connection", + "ordererURL", ordererURL) + + // Create a network node with the orderer details + networkNode := network.Node{ + Addr: ordererURL, + TLSCACertByte: []byte(ordererTLSCACert), + } + + // Establish connection to the orderer + ordererConn, err := network.DialConnection(networkNode) + if err != nil { + return nil, fmt.Errorf("failed to dial orderer connection: %w", err) + } + + return ordererConn, nil +} + // PrepareConfigUpdate prepares a config update for the given operations func (d *FabricDeployer) PrepareConfigUpdate(ctx context.Context, networkID int64, operations []ConfigUpdateOperation) (*ConfigUpdateProposal, error) { // Get network details @@ -622,7 +736,7 @@ func (d *FabricDeployer) PrepareConfigUpdate(ctx context.Context, networkID int6 return nil, fmt.Errorf("failed to unmarshal config block: %w", err) } - config, err := resource.ExtractConfigFromBlock(block) + config, err := ExtractConfigFromBlock(block) if err != nil { return nil, fmt.Errorf("failed to extract config from block: %w", err) } @@ -802,7 +916,7 @@ func (d *FabricDeployer) CreateGenesisBlock(networkID int64, config interface{}) if err != nil { return nil, fmt.Errorf("failed to get nodes by organization ID: %w", err) } - listCreateNetworkNodes := []db.CreateNetworkNodeParams{} + listCreateNetworkNodes := []*db.CreateNetworkNodeParams{} // Handle internal peer organizations for _, org := range fabricConfig.PeerOrganizations { @@ -840,7 +954,7 @@ func (d *FabricDeployer) CreateGenesisBlock(networkID int64, config interface{}) for _, node := range orgNodes { peerNodes = append(peerNodes, node) - listCreateNetworkNodes = append(listCreateNetworkNodes, db.CreateNetworkNodeParams{ + listCreateNetworkNodes = append(listCreateNetworkNodes, &db.CreateNetworkNodeParams{ NetworkID: networkID, NodeID: node.ID, Status: "pending", @@ -913,7 +1027,7 @@ func (d *FabricDeployer) CreateGenesisBlock(networkID int64, config interface{}) } if node.NodeType == nodetypes.NodeTypeFabricOrderer { ordererNodes = append(ordererNodes, node) - listCreateNetworkNodes = append(listCreateNetworkNodes, db.CreateNetworkNodeParams{ + listCreateNetworkNodes = append(listCreateNetworkNodes, &db.CreateNetworkNodeParams{ NetworkID: networkID, NodeID: node.ID, Status: "pending", @@ -994,7 +1108,7 @@ func (d *FabricDeployer) CreateGenesisBlock(networkID int64, config interface{}) } // Update network with genesis block - _, err = d.db.UpdateNetworkGenesisBlock(context.Background(), db.UpdateNetworkGenesisBlockParams{ + _, err = d.db.UpdateNetworkGenesisBlock(context.Background(), &db.UpdateNetworkGenesisBlockParams{ ID: networkID, GenesisBlockB64: sql.NullString{ String: channel.ConfigData, // Store base64 encoded genesis block @@ -1062,7 +1176,7 @@ func (d *FabricDeployer) JoinNode(networkId int64, genesisBlock []byte, nodeID i } // Update network node status to "joined" - _, err = d.db.UpdateNetworkNodeStatus(ctx, db.UpdateNetworkNodeStatusParams{ + _, err = d.db.UpdateNetworkNodeStatus(ctx, &db.UpdateNetworkNodeStatusParams{ NetworkID: network.ID, NodeID: nodeID, Status: "joined", @@ -1227,7 +1341,7 @@ func (d *FabricDeployer) RemoveNode(networkID int64, nodeID int64) error { } // Update network node status to "removed" - _, err = d.db.UpdateNetworkNodeStatus(ctx, db.UpdateNetworkNodeStatusParams{ + _, err = d.db.UpdateNetworkNodeStatus(ctx, &db.UpdateNetworkNodeStatusParams{ NetworkID: network.ID, NodeID: nodeID, Status: "removed", @@ -1284,7 +1398,7 @@ func (d *FabricDeployer) UnjoinNode(networkID int64, nodeID int64) error { } // Update network node status to "unjoined" - _, err = d.db.UpdateNetworkNodeStatus(ctx, db.UpdateNetworkNodeStatusParams{ + _, err = d.db.UpdateNetworkNodeStatus(ctx, &db.UpdateNetworkNodeStatusParams{ NetworkID: network.ID, NodeID: nodeID, Status: "unjoined", @@ -1296,6 +1410,126 @@ func (d *FabricDeployer) UnjoinNode(networkID int64, nodeID int64) error { return nil } +type UpdateOrganizationCRLInput struct { + OrganizationID int64 `json:"organizationId" validate:"required"` +} + +func (d *FabricDeployer) UpdateOrganizationCRL(ctx context.Context, networkID int64, input UpdateOrganizationCRLInput) (string, error) { + // Get network details + network, err := d.db.GetNetwork(ctx, networkID) + if err != nil { + return "", fmt.Errorf("failed to get network: %w", err) + } + + // Get organization details + org, err := d.db.GetFabricOrganizationByID(ctx, input.OrganizationID) + if err != nil { + return "", fmt.Errorf("failed to get organization: %w", err) + } + + // Get the CRL from organization service + crl, err := d.orgService.GetCRL(ctx, input.OrganizationID) + if err != nil { + return "", fmt.Errorf("failed to get CRL: %w", err) + } + + // Get a peer from the organization to submit the update + nodes, err := d.nodes.GetFabricNodesByOrganization(ctx, input.OrganizationID) + if err != nil { + return "", fmt.Errorf("failed to get organization nodes: %w", err) + } + + var peer *nodeservice.NodeResponse + for _, node := range nodes { + if node.NodeType == nodetypes.NodeTypeFabricPeer { + peer = &node + break + } + } + if peer == nil { + return "", fmt.Errorf("no peer found for organization %d", input.OrganizationID) + } + + // Get an orderer node from the network + networkNodes, err := d.db.GetNetworkNodes(ctx, networkID) + if err != nil { + return "", fmt.Errorf("failed to get network nodes: %w", err) + } + + var orderer *db.GetNetworkNodesRow + for _, node := range networkNodes { + if node.NodeType.String == string(nodetypes.NodeTypeFabricOrderer) { + orderer = node + break + } + } + if orderer == nil { + return "", fmt.Errorf("no orderer found in network %d", networkID) + } + + // Get orderer TLS CA cert + ordererConfig := &nodetypes.FabricOrdererDeploymentConfig{} + if err := json.Unmarshal([]byte(orderer.DeploymentConfig.String), ordererConfig); err != nil { + return "", fmt.Errorf("failed to unmarshal orderer config: %w", err) + } + + ordererTLSKey, err := d.keyMgmt.GetKey(ctx, int(ordererConfig.TLSKeyID)) + if err != nil { + return "", fmt.Errorf("failed to get orderer TLS key: %w", err) + } + if ordererTLSKey.Certificate == nil { + return "", fmt.Errorf("orderer TLS certificate not found") + } + ordererURL := ordererConfig.GetAddress() + ordererCert := *ordererTLSKey.Certificate + + p, err := d.nodes.GetFabricPeer(ctx, peer.ID) + if err != nil { + return "", fmt.Errorf("failed to get fabric peer: %w", err) + } + channelConfig, err := p.GetChannelConfig(ctx, network.Name, ordererURL, ordererCert) + if err != nil { + return "", fmt.Errorf("failed to get channel config: %w", err) + } + + // Get channel name from network config + var fabricConfig types.FabricNetworkConfig + if err := json.Unmarshal([]byte(network.Config.String), &fabricConfig); err != nil { + return "", fmt.Errorf("failed to unmarshal network config: %w", err) + } + + // Generate channel update + channelUpdate, err := d.channelService.SetCRL(&channel.SetCRLInput{ + CurrentConfig: channelConfig.ChannelGroup, + ChannelName: fabricConfig.ChannelName, + MSPID: org.MspID, + CRL: crl, + }) + if err != nil { + return "", fmt.Errorf("failed to create anchor peer update: %w", err) + } + + // Get peer instance + fabricPeer, err := d.nodes.GetFabricPeer(ctx, peer.ID) + if err != nil { + return "", fmt.Errorf("failed to get fabric peer: %w", err) + } + + // Save channel config + resp, err := fabricPeer.SaveChannelConfig(ctx, + fabricConfig.ChannelName, + ordererURL, + *ordererTLSKey.Certificate, + channelUpdate, + ) + if err != nil { + return "", fmt.Errorf("failed to save channel config: %w", err) + } + + return resp.TransactionID, nil + +} + type CreateAnchorPeerUpdateInput struct { ChannelName string OrgMSPID string @@ -1342,7 +1576,7 @@ func (d *FabricDeployer) SetAnchorPeers(ctx context.Context, networkID int64, or var orderer *db.GetNetworkNodesRow for _, node := range networkNodes { if node.NodeType.String == string(nodetypes.NodeTypeFabricOrderer) { - orderer = &node + orderer = node break } } @@ -1363,7 +1597,7 @@ func (d *FabricDeployer) SetAnchorPeers(ctx context.Context, networkID int64, or if ordererTLSKey.Certificate == nil { return "", fmt.Errorf("orderer TLS certificate not found") } - ordererURL := ordererConfig.GetURL() + ordererURL := ordererConfig.GetAddress() ordererCert := *ordererTLSKey.Certificate p, err := d.nodes.GetFabricPeer(ctx, peer.ID) @@ -1412,7 +1646,7 @@ func (d *FabricDeployer) SetAnchorPeers(ctx context.Context, networkID int64, or fabricConfig.ChannelName, ordererURL, *ordererTLSKey.Certificate, - []byte(channelUpdate), + channelUpdate, ) if err != nil { return "", fmt.Errorf("failed to save channel config: %w", err) @@ -1659,69 +1893,106 @@ func (d *FabricDeployer) FetchCurrentChannelConfig(ctx context.Context, networkI if err != nil { return nil, fmt.Errorf("failed to get organization: %w", err) } - fabricOrgItem := fabricorg.NewOrganizationService(d.orgService, d.keyMgmt, d.logger, fabricOrg.MspID) + fabricOrgItem := fabricorg.NewOrganizationService(d.orgService, d.keyMgmt, d.logger, fabricOrg.MspID, d.db) - // First try to get orderer from active nodes - var ordererURL, ordererTLSCert string + // Get all available orderers - first from active nodes + var orderersList []struct { + address string + tlsCert string + } + + // First try to get orderers from active nodes for _, node := range networkNodes { if node.NodeType.String == string(nodetypes.NodeTypeFabricOrderer) && node.Status == "joined" { ordererNode, err := d.nodes.GetNodeByID(ctx, node.NodeID) if err != nil { + d.logger.Warn("Failed to get orderer node", "nodeID", node.NodeID, "error", err) continue } ordererConfig := ordererNode.FabricOrderer - ordererURL = fmt.Sprintf("grpcs://%s", ordererConfig.ExternalEndpoint) - // Get orderer TLS cert ordererTLSKey, err := d.keyMgmt.GetKey(ctx, int(ordererConfig.TLSKeyID)) if err != nil || ordererTLSKey.Certificate == nil { + d.logger.Warn("Failed to get orderer TLS cert", "nodeID", node.NodeID, "error", err) continue } - ordererTLSCert = *ordererTLSKey.Certificate - break + orderersList = append(orderersList, struct { + address string + tlsCert string + }{ + address: ordererConfig.ExternalEndpoint, + tlsCert: *ordererTLSKey.Certificate, + }) } } - // If no active orderer found, try to get from genesis block - if ordererURL == "" { + // If no active orderers found, try to get from genesis block + if len(orderersList) == 0 { orderers, err := d.GetOrderersFromGenesisBlock(ctx, networkID) if err != nil { return nil, fmt.Errorf("failed to get orderers from genesis block: %w", err) } - if len(orderers) > 0 { - ordererURL = orderers[0].URL - ordererTLSCert = orderers[0].TLSCert - } else { - return nil, fmt.Errorf("no orderers found in network or genesis block") + for _, orderer := range orderers { + // Remove the grpcs:// prefix if present + address := orderer.URL + if strings.HasPrefix(address, "grpcs://") { + address = strings.TrimPrefix(address, "grpcs://") + } + orderersList = append(orderersList, struct { + address string + tlsCert string + }{ + address: address, + tlsCert: orderer.TLSCert, + }) } } - // Fetch channel config from peer - channelConfig, err := fabricOrgItem.GetConfigBlockWithNetworkConfig(ctx, network.Name, ordererURL, ordererTLSCert) - if err != nil { - return nil, fmt.Errorf("failed to get channel config from peer: %w", err) + if len(orderersList) == 0 { + return nil, fmt.Errorf("no orderers found in network or genesis block") } - // Marshal the config block - configBytes, err := proto.Marshal(channelConfig) - if err != nil { - return nil, fmt.Errorf("failed to marshal config block: %w", err) - } + // Try each orderer until one succeeds + var lastErr error + for _, orderer := range orderersList { + d.logger.Info("Attempting to fetch channel config from orderer", "address", orderer.address) - // Update the current config block in the database - configBase64 := base64.StdEncoding.EncodeToString(configBytes) - err = d.db.UpdateNetworkCurrentConfigBlock(ctx, db.UpdateNetworkCurrentConfigBlockParams{ - ID: networkID, - CurrentConfigBlockB64: sql.NullString{ - String: configBase64, - Valid: true, - }, - }) - if err != nil { - return nil, fmt.Errorf("failed to update network current config block: %w", err) + // Fetch channel config from orderer + channelConfig, err := fabricOrgItem.GetConfigBlockWithNetworkConfig(ctx, network.Name, orderer.address, orderer.tlsCert) + if err != nil { + d.logger.Warn("Failed to get channel config from orderer", "address", orderer.address, "error", err) + lastErr = err + continue + } + + // Marshal the config block + configBytes, err := proto.Marshal(channelConfig) + if err != nil { + lastErr = fmt.Errorf("failed to marshal config block: %w", err) + continue + } + + // Update the current config block in the database + configBase64 := base64.StdEncoding.EncodeToString(configBytes) + err = d.db.UpdateNetworkCurrentConfigBlock(ctx, &db.UpdateNetworkCurrentConfigBlockParams{ + ID: networkID, + CurrentConfigBlockB64: sql.NullString{ + String: configBase64, + Valid: true, + }, + }) + if err != nil { + lastErr = fmt.Errorf("failed to update network current config block: %w", err) + continue + } + + // Successfully fetched and stored config + d.logger.Info("Successfully fetched channel config from orderer", "address", orderer.address) + return configBytes, nil } - return configBytes, nil + // If we get here, all orderers failed + return nil, fmt.Errorf("failed to fetch channel config from any orderer: %w", lastErr) } // GetOrdererInfoFromConfig extracts orderer information from a channel config @@ -1880,7 +2151,7 @@ func (d *FabricDeployer) GetOrderersFromConfigBlock(ctx context.Context, blockBy return nil, fmt.Errorf("failed to unmarshal block: %w", err) } - cmnConfig, err := resource.ExtractConfigFromBlock(block) + cmnConfig, err := ExtractConfigFromBlock(block) if err != nil { return nil, fmt.Errorf("failed to extract config from block: %w", err) } @@ -1934,7 +2205,7 @@ func (d *FabricDeployer) GetOrderersFromGenesisBlock(ctx context.Context, networ return nil, fmt.Errorf("failed to unmarshal genesis block: %w", err) } - cmnConfig, err := resource.ExtractConfigFromBlock(block) + cmnConfig, err := ExtractConfigFromBlock(block) if err != nil { return nil, fmt.Errorf("failed to extract config from block: %w", err) } @@ -1979,7 +2250,7 @@ func (d *FabricDeployer) ImportNetworkWithOrg(ctx context.Context, channelID str if err != nil { return "", fmt.Errorf("failed to get organization: %w", err) } - orgService := fabricorg.NewOrganizationService(d.orgService, d.keyMgmt, d.logger, org.MspID) + orgService := fabricorg.NewOrganizationService(d.orgService, d.keyMgmt, d.logger, org.MspID, d.db) // Validate TLS certificate block, _ := pem.Decode(ordererTLSCert) @@ -2016,7 +2287,7 @@ func (d *FabricDeployer) ImportNetworkWithOrg(ctx context.Context, channelID str } // Create network in database - _, err = d.db.CreateNetworkFull(ctx, db.CreateNetworkFullParams{ + _, err = d.db.CreateNetworkFull(ctx, &db.CreateNetworkFullParams{ Name: channelID, Platform: "fabric", Description: sql.NullString{String: description, Valid: description != ""}, @@ -2078,7 +2349,7 @@ func (d *FabricDeployer) ImportNetwork(ctx context.Context, genesisFile []byte, networkID := uuid.New().String() // Create network in database - _, err := d.db.CreateNetworkFull(ctx, db.CreateNetworkFullParams{ + _, err := d.db.CreateNetworkFull(ctx, &db.CreateNetworkFullParams{ Name: channelName, Platform: "fabric", Description: sql.NullString{String: description, Valid: description != ""}, @@ -2114,3 +2385,369 @@ func CreateConfigUpdateEnvelope(channelID string, configUpdate *cb.ConfigUpdate) } return envelopeData, nil } + +// ExtractConfigFromBlock extracts channel configuration from block +func ExtractConfigFromBlock(block *cb.Block) (*cb.Config, error) { + if block == nil || block.Data == nil || len(block.Data.Data) == 0 { + return nil, errors.New("invalid block") + } + blockPayload := block.Data.Data[0] + + envelope := &cb.Envelope{} + if err := proto.Unmarshal(blockPayload, envelope); err != nil { + return nil, err + } + payload := &cb.Payload{} + if err := proto.Unmarshal(envelope.Payload, payload); err != nil { + return nil, err + } + + cfgEnv := &cb.ConfigEnvelope{} + if err := proto.Unmarshal(payload.Data, cfgEnv); err != nil { + return nil, err + } + return cfgEnv.Config, nil +} + +type ChainInfo struct { + Height uint64 + CurrentBlockHash string + PreviousBlockHash string +} + +func (d *FabricDeployer) GetChainInfo(ctx context.Context, networkID int64) (*ChainInfo, error) { + // Get network details + network, err := d.db.GetNetwork(ctx, networkID) + if err != nil { + return nil, fmt.Errorf("failed to get network: %w", err) + } + + // Get a peer from the network + networkNodes, err := d.db.GetNetworkNodes(ctx, networkID) + if err != nil { + return nil, fmt.Errorf("failed to get network nodes: %w", err) + } + + var peerNode *db.GetNetworkNodesRow + for _, node := range networkNodes { + if node.Role == "peer" && node.Status == "joined" { + peerNode = node + break + } + } + + if peerNode == nil { + return nil, fmt.Errorf("no active peer found in network") + } + + // Get peer instance + peer, err := d.nodes.GetFabricPeer(ctx, peerNode.NodeID) + if err != nil { + return nil, fmt.Errorf("failed to get peer: %w", err) + } + + chainInfo, err := peer.GetChannelInfoOnPeer(ctx, network.Name) + if err != nil { + return nil, fmt.Errorf("failed to get chain info: %w", err) + } + + return &ChainInfo{ + Height: uint64(chainInfo.Height), + CurrentBlockHash: fmt.Sprintf("%x", chainInfo.CurrentBlockHash), + PreviousBlockHash: fmt.Sprintf("%x", chainInfo.PreviousBlockHash), + }, nil +} +func (d *FabricDeployer) GetBlocks(ctx context.Context, networkID int64, limit, offset int32, reverse bool) ([]Block, int64, error) { + // Get network details + network, err := d.db.GetNetwork(ctx, networkID) + if err != nil { + return nil, 0, fmt.Errorf("failed to get network: %w", err) + } + + // Get a peer from the network + networkNodes, err := d.db.GetNetworkNodes(ctx, networkID) + if err != nil { + return nil, 0, fmt.Errorf("failed to get network nodes: %w", err) + } + + var peerNode *db.GetNetworkNodesRow + for _, node := range networkNodes { + if node.Role == "peer" && node.Status == "joined" { + peerNode = node + break + } + } + + if peerNode == nil { + return nil, 0, fmt.Errorf("no active peer found in network") + } + + // Get peer instance + peer, err := d.nodes.GetFabricPeer(ctx, peerNode.NodeID) + if err != nil { + return nil, 0, fmt.Errorf("failed to get peer: %w", err) + } + + // Get channel info to get total blocks + channelInfo, err := peer.GetChannelBlockInfo(ctx, network.Name) + if err != nil { + return nil, 0, fmt.Errorf("failed to get channel info: %w", err) + } + + total := int64(channelInfo.Height) + // Calculate start and end blocks based on reverse flag + var startBlock, endBlock int64 + + // Check if offset is greater than channel height + if int64(offset) >= int64(channelInfo.Height) { + return []Block{}, total, nil // Return empty slice if offset exceeds height + } + + if reverse { + // If reverse is true, we start from the newest blocks + endBlock = int64(channelInfo.Height) - 1 - int64(offset) + if endBlock < 0 { + endBlock = 0 + } + // Calculate endBlock based on startBlock and limit + startBlock = endBlock - int64(limit) + if startBlock > endBlock { // Handle underflow + startBlock = 0 + } + if startBlock < 0 { + startBlock = 0 + } + + // Example: if height is 31, limit is 10, offset is 0 + // endBlock = 30, startBlock = 21 + } else { + // Normal order (oldest first) + startBlock = int64(offset) + endBlock = int64(offset + limit - 1) + if endBlock >= int64(channelInfo.Height) { + endBlock = int64(channelInfo.Height) - 1 + } + } + + // Get blocks in range + blocks, err := peer.GetBlocksInRange(ctx, network.Name, uint64(startBlock), uint64(endBlock)) + if err != nil { + return nil, 0, fmt.Errorf("failed to get blocks: %w", err) + } + + // Convert blocks to response type + result := make([]Block, len(blocks)) + for i, block := range blocks { + blck, err := d.MapBlock(block) + if err != nil { + return nil, 0, fmt.Errorf("failed to map block: %w", err) + } + result[i] = *blck + } + + return result, total, nil +} + +// GetBlocks retrieves blocks from a specific network +func (d *FabricDeployer) MapBlock(block *cb.Block) (*Block, error) { + timestamp := time.Now() + for _, txData := range block.Data.Data { + env := &cb.Envelope{} + err := proto.Unmarshal(txData, env) + if err != nil { + return nil, fmt.Errorf("failed to unmarshal envelope: %w", err) + } + payload := &cb.Payload{} + err = proto.Unmarshal(env.Payload, payload) + if err != nil { + return nil, fmt.Errorf("failed to unmarshal payload: %w", err) + } + chdr := &cb.ChannelHeader{} + err = proto.Unmarshal(payload.Header.ChannelHeader, chdr) + if err != nil { + return nil, fmt.Errorf("failed to unmarshal channel header: %w", err) + } + txDate, err := ptypes.Timestamp(chdr.Timestamp) + if err != nil { + return nil, fmt.Errorf("failed to parse timestamp: %w", err) + } + timestamp = txDate + break + } + buffer := &bytes.Buffer{} + err := protolator.DeepMarshalJSON(buffer, block) + if err != nil { + return nil, fmt.Errorf("failed to marshal block data: %w", err) + } + blockDataJson := buffer.Bytes() + return &Block{ + Number: block.Header.Number, + Hash: fmt.Sprintf("%x", block.Header.DataHash), + PreviousHash: fmt.Sprintf("%x", block.Header.PreviousHash), + Timestamp: timestamp, + TxCount: len(block.Data.Data), + Data: blockDataJson, + }, nil +} + +type BlockWithTransactions struct { + Block Block + Transactions []Transaction +} + +// GetBlockTransactions retrieves all transactions from a specific block +func (d *FabricDeployer) GetBlockTransactions(ctx context.Context, networkID int64, blockNum uint64) (*BlockWithTransactions, error) { + // Get network details + network, err := d.db.GetNetwork(ctx, networkID) + if err != nil { + return nil, fmt.Errorf("failed to get network: %w", err) + } + + // Get a peer from the network + networkNodes, err := d.db.GetNetworkNodes(ctx, networkID) + if err != nil { + return nil, fmt.Errorf("failed to get network nodes: %w", err) + } + + var peerNode *db.GetNetworkNodesRow + for _, node := range networkNodes { + if node.Role == "peer" && node.Status == "joined" { + peerNode = node + break + } + } + + if peerNode == nil { + return nil, fmt.Errorf("no active peer found in network") + } + + // Get peer instance + peer, err := d.nodes.GetFabricPeer(ctx, peerNode.NodeID) + if err != nil { + return nil, fmt.Errorf("failed to get peer: %w", err) + } + + // Get transactions from block + block, err := peer.GetBlock(ctx, network.Name, blockNum) + if err != nil { + return nil, fmt.Errorf("failed to get block: %w", err) + } + envelopes, err := peer.GetBlockTransactions(ctx, network.Name, blockNum) + if err != nil { + return nil, fmt.Errorf("failed to get block transactions: %w", err) + } + + // Convert envelopes to transactions + transactions := make([]Transaction, len(envelopes)) + for i, env := range envelopes { + payload := &cb.Payload{} + if err := proto.Unmarshal(env.Payload, payload); err != nil { + continue + } + + chdr := &cb.ChannelHeader{} + if err := proto.Unmarshal(payload.Header.ChannelHeader, chdr); err != nil { + continue + } + + shdr := &cb.SignatureHeader{} + if err := proto.Unmarshal(payload.Header.SignatureHeader, shdr); err != nil { + continue + } + + transactions[i] = Transaction{ + ID: chdr.TxId, + BlockNum: blockNum, + Timestamp: time.Unix(chdr.Timestamp.Seconds, int64(chdr.Timestamp.Nanos)), + Type: cb.HeaderType_name[int32(chdr.Type)], + Creator: string(shdr.Creator), + Status: "success", + } + } + blck, err := d.MapBlock(block) + if err != nil { + return nil, fmt.Errorf("failed to map block: %w", err) + } + blockWithTransactions := &BlockWithTransactions{ + Block: *blck, + Transactions: transactions, + } + return blockWithTransactions, nil +} + +// GetTransaction retrieves a specific transaction by its ID +func (d *FabricDeployer) GetTransaction(ctx context.Context, networkID int64, txID string) (Transaction, error) { + // Get network details + network, err := d.db.GetNetwork(ctx, networkID) + if err != nil { + return Transaction{}, fmt.Errorf("failed to get network: %w", err) + } + + // Get a peer from the network + networkNodes, err := d.db.GetNetworkNodes(ctx, networkID) + if err != nil { + return Transaction{}, fmt.Errorf("failed to get network nodes: %w", err) + } + + var peerNode *db.GetNetworkNodesRow + for _, node := range networkNodes { + if node.Role == "peer" && node.Status == "joined" { + peerNode = node + break + } + } + + if peerNode == nil { + return Transaction{}, fmt.Errorf("no active peer found in network") + } + + // Get peer instance + peer, err := d.nodes.GetFabricPeer(ctx, peerNode.NodeID) + if err != nil { + return Transaction{}, fmt.Errorf("failed to get peer: %w", err) + } + + // Get channel info + channelInfo, err := peer.GetChannelBlockInfo(ctx, network.Name) + if err != nil { + return Transaction{}, fmt.Errorf("failed to get channel info: %w", err) + } + + // Search for transaction in blocks + for blockNum := uint64(0); blockNum < channelInfo.Height; blockNum++ { + transactions, err := peer.GetBlockTransactions(ctx, network.Name, blockNum) + if err != nil { + continue + } + + for _, tx := range transactions { + payload := &cb.Payload{} + if err := proto.Unmarshal(tx.Payload, payload); err != nil { + continue + } + + chdr := &cb.ChannelHeader{} + if err := proto.Unmarshal(payload.Header.ChannelHeader, chdr); err != nil { + continue + } + + if chdr.TxId == txID { + shdr := &cb.SignatureHeader{} + if err := proto.Unmarshal(payload.Header.SignatureHeader, shdr); err != nil { + continue + } + + return Transaction{ + ID: chdr.TxId, + BlockNum: blockNum, + Timestamp: time.Unix(chdr.Timestamp.Seconds, int64(chdr.Timestamp.Nanos)), + Type: cb.HeaderType_name[int32(chdr.Type)], + Creator: string(shdr.Creator), + Status: "success", + }, nil + } + } + } + + return Transaction{}, fmt.Errorf("transaction not found") +} diff --git a/pkg/networks/service/fabric/org/org.go b/pkg/networks/service/fabric/org/org.go index d55e8ac..7d22bed 100644 --- a/pkg/networks/service/fabric/org/org.go +++ b/pkg/networks/service/fabric/org/org.go @@ -1,191 +1,42 @@ package org import ( - "bytes" "context" + "crypto" + "crypto/rand" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + "database/sql" + "encoding/asn1" + "encoding/pem" + "errors" "fmt" + "math/big" "strings" - "text/template" + "time" - "github.com/golang/protobuf/proto" + "google.golang.org/grpc" + "google.golang.org/protobuf/proto" + "github.com/chainlaunch/chainlaunch/internal/protoutil" + "github.com/chainlaunch/chainlaunch/pkg/db" "github.com/chainlaunch/chainlaunch/pkg/fabric/service" keymanagement "github.com/chainlaunch/chainlaunch/pkg/keymanagement/service" "github.com/chainlaunch/chainlaunch/pkg/logger" - "github.com/hyperledger/fabric-protos-go/common" - "github.com/hyperledger/fabric-sdk-go/pkg/client/resmgmt" - "github.com/hyperledger/fabric-sdk-go/pkg/common/providers/msp" - "github.com/hyperledger/fabric-sdk-go/pkg/core/config" - "github.com/hyperledger/fabric-sdk-go/pkg/core/cryptosuite" - "github.com/hyperledger/fabric-sdk-go/pkg/core/cryptosuite/bccsp/sw" - "github.com/hyperledger/fabric-sdk-go/pkg/fab" - "github.com/hyperledger/fabric-sdk-go/pkg/fabsdk" - mspimpl "github.com/hyperledger/fabric-sdk-go/pkg/msp" + "github.com/hyperledger/fabric-admin-sdk/pkg/channel" + "github.com/hyperledger/fabric-admin-sdk/pkg/identity" + "github.com/hyperledger/fabric-admin-sdk/pkg/network" + gwidentity "github.com/hyperledger/fabric-gateway/pkg/identity" + cb "github.com/hyperledger/fabric-protos-go-apiv2/common" ) -const tmplGoConfig = ` -name: hlf-network -version: 1.0.0 -client: - organization: "{{ .Organization }}" -{{- if not .Organizations }} -organizations: {} -{{- else }} -organizations: - {{ range $org := .Organizations }} - {{ $org.MSPID }}: - mspid: {{ $org.MSPID }} - cryptoPath: /tmp/cryptopath -{{ if not $org.Users }} - users: {} -{{- else }} - users: - {{- range $user := $org.Users }} - {{ $user.Name }}: - cert: - pem: | -{{ $user.Cert | indent 12 }} - key: - pem: | -{{ $user.Key | indent 12 }} -{{- end }} -{{- end }} -{{- if not $org.CertAuths }} - certificateAuthorities: [] -{{- else }} - certificateAuthorities: - {{- range $ca := $org.CertAuths }} - - {{ $ca.Name }} - {{- end }} -{{- end }} -{{- if not $org.Peers }} - peers: [] -{{- else }} - peers: - {{- range $peer := $org.Peers }} - - {{ $peer }} - {{- end }} -{{- end }} -{{- if not $org.Orderers }} - orderers: [] -{{- else }} - orderers: - {{- range $orderer := $org.Orderers }} - - {{ $orderer }} - {{- end }} - - {{- end }} -{{- end }} -{{- end }} - -{{- if not .Orderers }} -{{- else }} -orderers: -{{- range $orderer := .Orderers }} - {{$orderer.Name}}: - url: {{ $orderer.URL }} - grpcOptions: - allow-insecure: false - tlsCACerts: - pem: | -{{ $orderer.TLSCACert | indent 8 }} -{{- end }} -{{- end }} - -{{- if not .Peers }} -{{- else }} -peers: - {{- range $peer := .Peers }} - {{$peer.Name}}: - url: {{ $peer.URL }} - tlsCACerts: - pem: | -{{ $peer.TLSCACert | indent 8 }} -{{- end }} -{{- end }} - -{{- if not .CertAuths }} -{{- else }} -certificateAuthorities: -{{- range $ca := .CertAuths }} - {{ $ca.Name }}: - url: https://{{ $ca.URL }} -{{if $ca.EnrollID }} - registrar: - enrollId: {{ $ca.EnrollID }} - enrollSecret: "{{ $ca.EnrollSecret }}" -{{ end }} - caName: {{ $ca.CAName }} - tlsCACerts: - pem: - - | -{{ $ca.TLSCert | indent 12 }} - -{{- end }} -{{- end }} - -channels: - _default: -{{- if not .Orderers }} - orderers: [] -{{- else }} - orderers: -{{- range $orderer := .Orderers }} - - {{$orderer.Name}} -{{- end }} -{{- end }} -{{- if not .Peers }} - peers: {} -{{- else }} - peers: -{{- range $peer := .Peers }} - {{$peer.Name}}: - discover: true - endorsingPeer: true - chaincodeQuery: true - ledgerQuery: true - eventSource: true -{{- end }} -{{- end }} - -` - -type OrgUser struct { - Name string - Cert string - Key string -} -type Org struct { - MSPID string - CertAuths []string - Peers []string - Orderers []string - Users []OrgUser -} -type Peer struct { - Name string - URL string - TLSCACert string -} -type CA struct { - Name string - URL string - TLSCert string - EnrollID string - EnrollSecret string -} - -type Orderer struct { - URL string - Name string - TLSCACert string -} - type FabricOrg struct { orgService *service.OrganizationService keyMgmtService *keymanagement.KeyManagementService logger *logger.Logger mspID string + queries *db.Queries } func NewOrganizationService( @@ -193,243 +44,256 @@ func NewOrganizationService( keyMgmtService *keymanagement.KeyManagementService, logger *logger.Logger, mspID string, + queries *db.Queries, ) *FabricOrg { return &FabricOrg{ orgService: orgService, keyMgmtService: keyMgmtService, logger: logger, mspID: mspID, + queries: queries, } } -// GenerateNetworkConfig generates a network configuration for connecting to Fabric network -func (s *FabricOrg) GenerateNetworkConfig(ctx context.Context, channelID, ordererURL, ordererTLSCert string) (string, error) { - s.logger.Info("Generating network config", +// GetConfigBlockWithNetworkConfig retrieves a config block using a generated network config +func (s *FabricOrg) GetConfigBlockWithNetworkConfig(ctx context.Context, channelID, ordererURL, ordererTLSCert string) (*cb.Block, error) { + s.logger.Info("Fetching channel config with network config", "mspID", s.mspID, "channel", channelID, - "ordererUrl", ordererURL) - + "ordererUrl", ordererURL, + ) + ordererNode := network.Node{ + Addr: ordererURL, + TLSCACertByte: []byte(ordererTLSCert), + } + ordererConn, err := network.DialConnection(ordererNode) + if err != nil { + return nil, fmt.Errorf("failed to dial orderer: %w", err) + } + defer ordererConn.Close() // Get organization details org, err := s.orgService.GetOrganizationByMspID(ctx, s.mspID) if err != nil { - return "", fmt.Errorf("failed to get organization: %w", err) + return nil, fmt.Errorf("failed to get organization: %w", err) } // Get signing key var privateKeyPEM string if !org.AdminSignKeyID.Valid { - return "", fmt.Errorf("organization has no admin sign key") + return nil, fmt.Errorf("organization has no admin sign key") } adminSignKey, err := s.keyMgmtService.GetKey(ctx, int(org.AdminSignKeyID.Int64)) if err != nil { - return "", fmt.Errorf("failed to get admin sign key: %w", err) + return nil, fmt.Errorf("failed to get admin sign key: %w", err) } if adminSignKey.Certificate == nil { - return "", fmt.Errorf("admin sign key has no certificate") + return nil, fmt.Errorf("admin sign key has no certificate") } // Get private key from key management service privateKeyPEM, err = s.keyMgmtService.GetDecryptedPrivateKey(int(org.AdminSignKeyID.Int64)) if err != nil { - return "", fmt.Errorf("failed to get private key: %w", err) + return nil, fmt.Errorf("failed to get private key: %w", err) } - // Create template data - orgs := []*Org{} - var peers []*Peer - var certAuths []*CA - var ordererNodes []*Orderer - - // Add organization with user - fabricOrg := &Org{ - MSPID: org.MspID, - CertAuths: []string{}, - Peers: []string{}, - Orderers: []string{}, + cert, err := gwidentity.CertificateFromPEM([]byte(*adminSignKey.Certificate)) + if err != nil { + return nil, fmt.Errorf("failed to read certificate: %w", err) } - // Add admin user if signing certificate is available - if org.SignKeyID.Valid && org.SignCertificate != "" { - adminUser := OrgUser{ - Name: "Admin", - Cert: *adminSignKey.Certificate, - Key: privateKeyPEM, - } - fabricOrg.Users = []OrgUser{adminUser} + priv, err := gwidentity.PrivateKeyFromPEM([]byte(privateKeyPEM)) + if err != nil { + return nil, fmt.Errorf("failed to read private key: %w", err) } - orgs = append(orgs, fabricOrg) - if ordererURL != "" && ordererTLSCert != "" { - fabricOrg.Orderers = []string{"orderer0"} - // Add orderer - orderer := &Orderer{ - URL: ordererURL, - Name: "orderer0", - TLSCACert: ordererTLSCert, - } - ordererNodes = append(ordererNodes, orderer) + ordererMSP, err := identity.NewPrivateKeySigningIdentity(s.mspID, cert, priv) + if err != nil { + return nil, fmt.Errorf("failed to create orderer msp: %w", err) } - - // Parse template - tmpl, err := template.New("networkConfig").Funcs(template.FuncMap{ - "indent": func(spaces int, v string) string { - pad := strings.Repeat(" ", spaces) - return pad + strings.Replace(v, "\n", "\n"+pad, -1) - }, - }).Parse(tmplGoConfig) + // Parse the orderer TLS certificate + ordererTLSCertParsed, err := tls.X509KeyPair([]byte(*adminSignKey.Certificate), []byte(privateKeyPEM)) if err != nil { - return "", fmt.Errorf("failed to parse network config template: %w", err) + return nil, fmt.Errorf("failed to parse orderer TLS certificate: %w", err) } - // Execute template - var buf bytes.Buffer - err = tmpl.Execute(&buf, map[string]interface{}{ - "Peers": peers, - "Orderers": ordererNodes, - "Organizations": orgs, - "CertAuths": certAuths, - "Organization": s.mspID, - "Internal": false, - }) + ordererBlock, err := channel.GetConfigBlockFromOrderer(ctx, ordererConn, ordererMSP, channelID, ordererTLSCertParsed) if err != nil { - return "", fmt.Errorf("failed to execute network config template: %w", err) + return nil, fmt.Errorf("failed to get config block from orderer: %w", err) } - return buf.String(), nil + return ordererBlock, nil } -// GetConfigBlockWithNetworkConfig retrieves a config block using a generated network config -func (s *FabricOrg) GetConfigBlockWithNetworkConfig(ctx context.Context, channelID, ordererURL, ordererTLSCert string) (*common.Block, error) { - s.logger.Info("Fetching channel config with network config", - "mspID", s.mspID, - "channel", channelID, - "ordererUrl", ordererURL) - +// getAdminIdentity retrieves the admin identity for the organization +func (s *FabricOrg) getAdminIdentity(ctx context.Context) (identity.SigningIdentity, error) { // Get organization details org, err := s.orgService.GetOrganizationByMspID(ctx, s.mspID) if err != nil { return nil, fmt.Errorf("failed to get organization: %w", err) } - // Get signing key if !org.AdminSignKeyID.Valid { return nil, fmt.Errorf("organization has no signing key") } - // Generate network config - networkConfig, err := s.GenerateNetworkConfig(ctx, channelID, ordererURL, ordererTLSCert) + + // Get admin signing key + adminSignKey, err := s.keyMgmtService.GetKey(ctx, int(org.AdminSignKeyID.Int64)) if err != nil { - return nil, fmt.Errorf("failed to generate network config: %w", err) + return nil, fmt.Errorf("failed to get admin sign key: %w", err) + } + if adminSignKey.Certificate == nil { + return nil, fmt.Errorf("admin sign key has no certificate") } - // Initialize SDK with network config - configBackend := config.FromRaw([]byte(networkConfig), "yaml") - sdk, err := fabsdk.New(configBackend) + // Get private key from key management service + privateKeyPEM, err := s.keyMgmtService.GetDecryptedPrivateKey(int(org.AdminSignKeyID.Int64)) if err != nil { - return nil, fmt.Errorf("failed to create sdk: %w", err) + return nil, fmt.Errorf("failed to get private key: %w", err) } - defer sdk.Close() - // Create SDK context - sdkContext := sdk.Context( - fabsdk.WithOrg(s.mspID), - fabsdk.WithUser("Admin"), - ) + cert, err := gwidentity.CertificateFromPEM([]byte(*adminSignKey.Certificate)) + if err != nil { + return nil, fmt.Errorf("failed to read certificate: %w", err) + } - // Create resource management client - resClient, err := resmgmt.New(sdkContext) + priv, err := gwidentity.PrivateKeyFromPEM([]byte(privateKeyPEM)) if err != nil { - return nil, fmt.Errorf("failed to create resmgmt client: %w", err) + return nil, fmt.Errorf("failed to read private key: %w", err) } - // Fetch channel configuration - configBlock, err := resClient.QueryConfigBlockFromOrderer(channelID) + signingIdentity, err := identity.NewPrivateKeySigningIdentity(s.mspID, cert, priv) if err != nil { - return nil, fmt.Errorf("failed to query channel config: %w", err) + return nil, fmt.Errorf("failed to create signing identity: %w", err) } - return configBlock, nil + return signingIdentity, nil } -// GetGenesisBlock fetches the genesis block for a channel from the orderer -func (s *FabricOrg) GetGenesisBlock(ctx context.Context, channelID string, ordererURL string, ordererTLSCert []byte) ([]byte, error) { - s.logger.Info("Fetching genesis block with network config", - "mspID", s.mspID, - "channel", channelID, - "ordererUrl", ordererURL) - +// getOrdererMSP creates a signing identity for interacting with the orderer +func (s *FabricOrg) getOrdererMSP(ctx context.Context) (identity.SigningIdentity, error) { // Get organization details org, err := s.orgService.GetOrganizationByMspID(ctx, s.mspID) if err != nil { return nil, fmt.Errorf("failed to get organization: %w", err) } - // Get signing key if !org.AdminSignKeyID.Valid { return nil, fmt.Errorf("organization has no signing key") } - // Generate network config - networkConfig, err := s.GenerateNetworkConfig(ctx, channelID, ordererURL, string(ordererTLSCert)) + // Get admin signing key + adminSignKey, err := s.keyMgmtService.GetKey(ctx, int(org.AdminSignKeyID.Int64)) if err != nil { - return nil, fmt.Errorf("failed to generate network config: %w", err) + return nil, fmt.Errorf("failed to get admin sign key: %w", err) + } + if adminSignKey.Certificate == nil { + return nil, fmt.Errorf("admin sign key has no certificate") } - // Initialize SDK with network config - configBackend := config.FromRaw([]byte(networkConfig), "yaml") - sdk, err := fabsdk.New(configBackend) + // Get private key from key management service + privateKeyPEM, err := s.keyMgmtService.GetDecryptedPrivateKey(int(org.AdminSignKeyID.Int64)) if err != nil { - return nil, fmt.Errorf("failed to create sdk: %w", err) + return nil, fmt.Errorf("failed to get private key: %w", err) } - defer sdk.Close() - resmClient, err := resmgmt.New(sdk.Context( - fabsdk.WithOrg(s.mspID), - fabsdk.WithUser("Admin"), - )) + + cert, err := gwidentity.CertificateFromPEM([]byte(*adminSignKey.Certificate)) if err != nil { - return nil, fmt.Errorf("failed to create resmgmt client: %w", err) + return nil, fmt.Errorf("failed to read certificate: %w", err) } - genesisBlock, err := resmClient.GenesisBlock(channelID) + + priv, err := gwidentity.PrivateKeyFromPEM([]byte(privateKeyPEM)) if err != nil { - return nil, fmt.Errorf("failed to query genesis block: %w", err) + return nil, fmt.Errorf("failed to read private key: %w", err) } - genesisBlockBytes, err := proto.Marshal(genesisBlock) + + ordererMSP, err := identity.NewPrivateKeySigningIdentity(s.mspID, cert, priv) if err != nil { - return nil, fmt.Errorf("failed to marshal genesis block: %w", err) + return nil, fmt.Errorf("failed to create orderer msp: %w", err) } - return genesisBlockBytes, nil + + return ordererMSP, nil } -// createSigningIdentity creates a signing identity from the organization's admin credentials -func (s *FabricOrg) createSigningIdentity(sdk *fabsdk.FabricSDK, privateKeyPEM string, certPEM string) (msp.SigningIdentity, error) { - sdkConfig, err := sdk.Config() +// getOrdererConnection establishes a gRPC connection to the orderer +func (s *FabricOrg) getOrdererConnection(ctx context.Context, ordererURL string, ordererTLSCert string) (*grpc.ClientConn, error) { + + // Create orderer connection + ordererConn, err := network.DialConnection(network.Node{ + Addr: strings.TrimPrefix(ordererURL, "grpcs://"), + TLSCACertByte: []byte(ordererTLSCert), + }) if err != nil { - return nil, fmt.Errorf("failed to get SDK config: %w", err) + return nil, fmt.Errorf("failed to create orderer connection: %w", err) } - cryptoConfig := cryptosuite.ConfigFromBackend(sdkConfig) - cryptoSuite, err := sw.GetSuiteByConfig(cryptoConfig) + return ordererConn, nil +} + +// getOrdererTLSKeyPair creates a TLS key pair for secure communication with the orderer +func (s *FabricOrg) getOrdererTLSKeyPair(ctx context.Context, ordererTLSCert string) (tls.Certificate, error) { + // Get organization details + org, err := s.orgService.GetOrganizationByMspID(ctx, s.mspID) if err != nil { - return nil, fmt.Errorf("failed to get crypto suite: %w", err) + return tls.Certificate{}, fmt.Errorf("failed to get organization: %w", err) } - userStore := mspimpl.NewMemoryUserStore() - endpointConfig, err := fab.ConfigFromBackend(sdkConfig) + if !org.AdminSignKeyID.Valid { + return tls.Certificate{}, fmt.Errorf("organization has no admin sign key") + } + + // Get private key from key management service + privateKeyPEM, err := s.keyMgmtService.GetDecryptedPrivateKey(int(org.AdminSignKeyID.Int64)) if err != nil { - return nil, fmt.Errorf("failed to get endpoint config: %w", err) + return tls.Certificate{}, fmt.Errorf("failed to get private key: %w", err) } - identityManager, err := mspimpl.NewIdentityManager(s.mspID, userStore, cryptoSuite, endpointConfig) + // Parse the orderer TLS certificate + ordererTLSCertParsed, err := tls.X509KeyPair([]byte(ordererTLSCert), []byte(privateKeyPEM)) if err != nil { - return nil, fmt.Errorf("failed to create identity manager: %w", err) + return tls.Certificate{}, fmt.Errorf("failed to parse orderer TLS certificate: %w", err) } - return identityManager.CreateSigningIdentity( - msp.WithPrivateKey([]byte(privateKeyPEM)), - msp.WithCert([]byte(certPEM)), - ) + return ordererTLSCertParsed, nil +} + +// GetGenesisBlock fetches the genesis block for a channel from the orderer +func (s *FabricOrg) GetGenesisBlock(ctx context.Context, channelID string, ordererURL string, ordererTLSCert []byte) ([]byte, error) { + s.logger.Info("Fetching genesis block with network config", + "mspID", s.mspID, + "channel", channelID, + "ordererUrl", ordererURL) + + ordererConn, err := s.getOrdererConnection(ctx, ordererURL, string(ordererTLSCert)) + if err != nil { + return nil, fmt.Errorf("failed to get orderer connection: %w", err) + } + defer ordererConn.Close() + + ordererMSP, err := s.getOrdererMSP(ctx) + if err != nil { + return nil, fmt.Errorf("failed to get orderer msp: %w", err) + } + + // Create TLS certificate from orderer TLS cert + ordererTLSKeyPair := tls.Certificate{ + Certificate: [][]byte{ordererTLSCert}, + } + if err != nil { + return nil, fmt.Errorf("failed to create orderer TLS certificate: %w", err) + } + genesisBlock, err := channel.GetGenesisBlock(ctx, ordererConn, ordererMSP, channelID, ordererTLSKeyPair) + if err != nil { + return nil, fmt.Errorf("failed to get genesis block: %w", err) + } + genesisBlockBytes, err := proto.Marshal(genesisBlock) + if err != nil { + return nil, fmt.Errorf("failed to marshal genesis block: %w", err) + } + + return genesisBlockBytes, nil } // CreateConfigSignature creates a signature for a config update using the organization's admin credentials -func (s *FabricOrg) CreateConfigSignature(ctx context.Context, channelID string, configUpdateBytes []byte) (*common.ConfigSignature, error) { +func (s *FabricOrg) CreateConfigSignature(ctx context.Context, channelID string, configUpdateBytes *cb.Envelope) (*cb.Envelope, error) { s.logger.Info("Creating config signature", "mspID", s.mspID, "channel", channelID) @@ -454,49 +318,251 @@ func (s *FabricOrg) CreateConfigSignature(ctx context.Context, channelID string, return nil, fmt.Errorf("admin sign key has no certificate") } - // Get private key - privateKeyPEM, err := s.keyMgmtService.GetDecryptedPrivateKey(int(org.AdminSignKeyID.Int64)) + // Create signing identity + signingIdentity, err := s.getAdminIdentity(ctx) if err != nil { - return nil, fmt.Errorf("failed to get private key: %w", err) + return nil, fmt.Errorf("failed to create signing identity: %w", err) } - // Generate network config for SDK initialization - networkConfig, err := s.GenerateNetworkConfig(ctx, channelID, "", "") // Empty orderer details as they're not needed for signing + // Create config signature from the config update bytes + signature, err := SignConfigTx(channelID, configUpdateBytes, signingIdentity) if err != nil { - return nil, fmt.Errorf("failed to generate network config: %w", err) + return nil, fmt.Errorf("failed to create config signature: %w", err) } + return signature, nil +} + +const ( + msgVersion = int32(0) + epoch = 0 +) - // Initialize SDK - configBackend := config.FromRaw([]byte(networkConfig), "yaml") - sdk, err := fabsdk.New(configBackend) +func SignConfigTx(channelID string, envConfigUpdate *cb.Envelope, signer identity.SigningIdentity) (*cb.Envelope, error) { + payload, err := protoutil.UnmarshalPayload(envConfigUpdate.Payload) if err != nil { - return nil, fmt.Errorf("failed to create sdk: %w", err) + return nil, errors.New("bad payload") } - defer sdk.Close() - // Create signing identity - signingIdentity, err := s.createSigningIdentity(sdk, privateKeyPEM, *adminSignKey.Certificate) + if payload.Header == nil || payload.Header.ChannelHeader == nil { + return nil, errors.New("bad header") + } + + ch, err := protoutil.UnmarshalChannelHeader(payload.Header.ChannelHeader) if err != nil { - return nil, fmt.Errorf("failed to create signing identity: %w", err) + return nil, errors.New("could not unmarshall channel header") } - // Create SDK context with signing identity - sdkContext := sdk.Context( - fabsdk.WithIdentity(signingIdentity), - fabsdk.WithOrg(s.mspID), - ) + if ch.Type != int32(cb.HeaderType_CONFIG_UPDATE) { + return nil, errors.New("bad type") + } + + if ch.ChannelId == "" { + return nil, errors.New("empty channel id") + } - // Create resource management client - resClient, err := resmgmt.New(sdkContext) + configUpdateEnv, err := protoutil.UnmarshalConfigUpdateEnvelope(payload.Data) if err != nil { - return nil, fmt.Errorf("failed to create resmgmt client: %w", err) + return nil, errors.New("bad config update env") } - // Create config signature from the config update bytes - signature, err := resClient.CreateConfigSignatureFromReader(signingIdentity, bytes.NewReader(configUpdateBytes)) + sigHeader, err := protoutil.NewSignatureHeader(signer) if err != nil { - return nil, fmt.Errorf("failed to create config signature: %w", err) + return nil, err } - return signature, nil + configSig := &cb.ConfigSignature{ + SignatureHeader: protoutil.MarshalOrPanic(sigHeader), + } + + configSig.Signature, err = signer.Sign(Concatenate(configSig.SignatureHeader, configUpdateEnv.ConfigUpdate)) + if err != nil { + return nil, err + } + + configUpdateEnv.Signatures = append(configUpdateEnv.Signatures, configSig) + + return protoutil.CreateSignedEnvelope(cb.HeaderType_CONFIG_UPDATE, channelID, signer, configUpdateEnv, msgVersion, epoch) +} + +func Concatenate[T any](slices ...[]T) []T { + size := 0 + for _, slice := range slices { + size += len(slice) + } + + result := make([]T, size) + i := 0 + for _, slice := range slices { + copy(result[i:], slice) + i += len(slice) + } + + return result +} + +// RevokeCertificate adds a certificate to the CRL +func (s *FabricOrg) RevokeCertificate(ctx context.Context, serialNumber *big.Int, revocationReason int) error { + s.logger.Info("Revoking certificate", + "mspID", s.mspID, + "serialNumber", serialNumber.String(), + "reason", revocationReason) + + // Get organization details + org, err := s.orgService.GetOrganizationByMspID(ctx, s.mspID) + if err != nil { + return fmt.Errorf("failed to get organization: %w", err) + } + + if !org.SignKeyID.Valid { + return fmt.Errorf("organization has no admin sign key") + } + + // Add the certificate to the database + err = s.queries.AddRevokedCertificate(ctx, &db.AddRevokedCertificateParams{ + FabricOrganizationID: org.ID, + SerialNumber: serialNumber.Text(16), // Store as hex string + RevocationTime: time.Now(), + Reason: int64(revocationReason), + IssuerCertificateID: sql.NullInt64{ + Int64: org.SignKeyID.Int64, + Valid: true, + }, + }) + if err != nil { + return fmt.Errorf("failed to add revoked certificate to database: %w", err) + } + + // Update the CRL timestamps in the organization + now := time.Now() + + err = s.queries.UpdateOrganizationCRL(ctx, &db.UpdateOrganizationCRLParams{ + ID: org.ID, + CrlLastUpdate: sql.NullTime{Time: now, Valid: true}, + CrlKeyID: org.AdminSignKeyID, + }) + if err != nil { + return fmt.Errorf("failed to update organization CRL info: %w", err) + } + + s.logger.Info("Successfully revoked certificate", + "mspID", s.mspID, + "serialNumber", serialNumber.String()) + + return nil +} + +// GetCRL returns the current CRL as PEM encoded bytes +func (s *FabricOrg) GetCRL(ctx context.Context) ([]byte, error) { + // Get organization details + org, err := s.orgService.GetOrganizationByMspID(ctx, s.mspID) + if err != nil { + return nil, fmt.Errorf("failed to get organization: %w", err) + } + + // Get all revoked certificates for this organization + revokedCerts, err := s.queries.GetRevokedCertificates(ctx, org.ID) + if err != nil { + return nil, fmt.Errorf("failed to get revoked certificates: %w", err) + } + + // Get the admin signing key for signing the CRL + adminSignKey, err := s.keyMgmtService.GetKey(ctx, int(org.SignKeyID.Int64)) + if err != nil { + return nil, fmt.Errorf("failed to get admin sign key: %w", err) + } + + // Parse the certificate + cert, err := gwidentity.CertificateFromPEM([]byte(*adminSignKey.Certificate)) + if err != nil { + return nil, fmt.Errorf("failed to parse certificate: %w", err) + } + + // Get private key from key management service + privateKeyPEM, err := s.keyMgmtService.GetDecryptedPrivateKey(int(org.SignKeyID.Int64)) + if err != nil { + return nil, fmt.Errorf("failed to get private key: %w", err) + } + + // Parse the private key + priv, err := gwidentity.PrivateKeyFromPEM([]byte(privateKeyPEM)) + if err != nil { + return nil, fmt.Errorf("failed to parse private key: %w", err) + } + + // Cast private key to crypto.Signer + signer, ok := priv.(crypto.Signer) + if !ok { + return nil, fmt.Errorf("private key does not implement crypto.Signer") + } + + // Create CRL + now := time.Now() + crl := &x509.RevocationList{ + Number: big.NewInt(1), + ThisUpdate: now, + NextUpdate: now.AddDate(0, 0, 7), // Valid for 7 days + } + + // Add all revoked certificates + for _, rc := range revokedCerts { + serialNumber, ok := new(big.Int).SetString(rc.SerialNumber, 16) + if !ok { + return nil, fmt.Errorf("invalid serial number format: %s", rc.SerialNumber) + } + + revokedCert := pkix.RevokedCertificate{ + SerialNumber: serialNumber, + RevocationTime: rc.RevocationTime, + Extensions: []pkix.Extension{ + { + Id: asn1.ObjectIdentifier{2, 5, 29, 21}, // CRLReason OID + Value: []byte{byte(rc.Reason)}, + }, + }, + } + crl.RevokedCertificates = append(crl.RevokedCertificates, revokedCert) + } + + // Create the CRL + crlBytes, err := x509.CreateRevocationList(rand.Reader, crl, cert, signer) + if err != nil { + return nil, fmt.Errorf("failed to create CRL: %w", err) + } + + // Encode the CRL in PEM format + pemBlock := &pem.Block{ + Type: "X509 CRL", + Bytes: crlBytes, + } + + return pem.EncodeToMemory(pemBlock), nil +} + +// InitializeCRL creates a new CRL if one doesn't exist +func (s *FabricOrg) InitializeCRL(ctx context.Context) error { + // Get organization details + org, err := s.orgService.GetOrganizationByMspID(ctx, s.mspID) + if err != nil { + return fmt.Errorf("failed to get organization: %w", err) + } + + if !org.SignKeyID.Valid { + return fmt.Errorf("organization has no admin sign key") + } + + // Update the CRL timestamps in the organization + now := time.Now() + err = s.queries.UpdateOrganizationCRL(ctx, &db.UpdateOrganizationCRLParams{ + ID: org.ID, + CrlLastUpdate: sql.NullTime{Time: now, Valid: true}, + CrlKeyID: org.SignKeyID, + }) + if err != nil { + return fmt.Errorf("failed to initialize organization CRL info: %w", err) + } + + s.logger.Info("Successfully initialized CRL", + "mspID", s.mspID) + + return nil } diff --git a/pkg/networks/service/fabric/types.go b/pkg/networks/service/fabric/types.go index 0fbbfe0..a9150f4 100644 --- a/pkg/networks/service/fabric/types.go +++ b/pkg/networks/service/fabric/types.go @@ -1,5 +1,7 @@ package fabric +import "time" + // NodeType represents the type of a Fabric node type NodeType string @@ -62,3 +64,66 @@ type NetworkConfig struct { Capabilities map[string][]string `json:"capabilities"` Policies map[string]interface{} `json:"policies"` } + +// Block represents a block in the Fabric blockchain +type Block struct { + Number uint64 `json:"number"` + Hash string `json:"hash"` + PreviousHash string `json:"previousHash"` + DataHash string `json:"dataHash"` + Timestamp time.Time `json:"timestamp"` + TxCount int `json:"txCount"` + Data []byte `json:"data"` +} + +// Transaction represents a transaction in a Fabric block +type Transaction struct { + ID string `json:"id"` + Type string `json:"type"` + Timestamp time.Time `json:"timestamp"` + Creator string `json:"creator"` + Status string `json:"status"` + BlockNum uint64 `json:"blockNum"` +} + +// BlockInfo represents information about the blockchain +type BlockInfo struct { + Height uint64 `json:"height"` + CurrentBlockHash string `json:"currentBlockHash"` + PreviousBlockHash string `json:"previousBlockHash"` +} + +// DeployerOptions contains options for the Fabric deployer +type DeployerOptions struct { + NetworkID int64 `json:"networkId"` + ChannelID string `json:"channelId"` + ConsortiumName string `json:"consortiumName"` + OrdererEndpoint string `json:"ordererEndpoint"` + PeerEndpoint string `json:"peerEndpoint"` +} + +// NetworkQueryOptions contains options for querying network data +type NetworkQueryOptions struct { + Limit int32 `json:"limit"` + Offset int32 `json:"offset"` +} + +// BlockQueryOptions contains options for querying blocks +type BlockQueryOptions struct { + StartBlock uint64 `json:"startBlock"` + EndBlock uint64 `json:"endBlock"` + Limit int32 `json:"limit"` + Offset int32 `json:"offset"` +} + +// PaginatedBlocks represents a paginated list of blocks +type PaginatedBlocks struct { + Items []Block `json:"items"` + TotalCount int64 `json:"totalCount"` +} + +// PaginatedTransactions represents a paginated list of transactions +type PaginatedTransactions struct { + Items []Transaction `json:"items"` + TotalCount int64 `json:"totalCount"` +} diff --git a/pkg/networks/service/service.go b/pkg/networks/service/service.go index 76c0ea6..4a177b3 100644 --- a/pkg/networks/service/service.go +++ b/pkg/networks/service/service.go @@ -177,7 +177,7 @@ func (s *NetworkService) CreateNetwork(ctx context.Context, name, description st // Generate a random network ID networkID := fmt.Sprintf("net_%s_%s", name, uuid.New().String()) // Create network in database - network, err := s.db.CreateNetwork(ctx, db.CreateNetworkParams{ + network, err := s.db.CreateNetwork(ctx, &db.CreateNetworkParams{ Name: name, Platform: string(baseConfig.Type), Description: sql.NullString{String: description, Valid: description != ""}, @@ -412,7 +412,7 @@ func (s *NetworkService) DeleteNetwork(ctx context.Context, networkID int64) err } // Helper function to map db.Network to service.Network -func (s *NetworkService) mapDBNetworkToServiceNetwork(n db.Network) *Network { +func (s *NetworkService) mapDBNetworkToServiceNetwork(n *db.Network) *Network { var config, deploymentConfig, exposedPorts json.RawMessage if n.Config.Valid { config = json.RawMessage(n.Config.String) @@ -461,7 +461,7 @@ func (s *NetworkService) mapDBNetworkToServiceNetwork(n db.Network) *Network { // UpdateNetworkStatus updates the status of a network func (s *NetworkService) UpdateNetworkStatus(ctx context.Context, networkID int64, status NetworkStatus) error { - err := s.db.UpdateNetworkStatus(ctx, db.UpdateNetworkStatusParams{ + err := s.db.UpdateNetworkStatus(ctx, &db.UpdateNetworkStatusParams{ ID: networkID, Status: string(status), }) @@ -567,7 +567,7 @@ func (s *NetworkService) AddNodeToNetwork(ctx context.Context, networkID, nodeID } // Check if node is already in network - exists, err := s.db.CheckNetworkNodeExists(ctx, db.CheckNetworkNodeExistsParams{ + exists, err := s.db.CheckNetworkNodeExists(ctx, &db.CheckNetworkNodeExistsParams{ NetworkID: networkID, NodeID: nodeID, }) @@ -579,7 +579,7 @@ func (s *NetworkService) AddNodeToNetwork(ctx context.Context, networkID, nodeID } // Create network node entry - _, err = s.db.CreateNetworkNode(ctx, db.CreateNetworkNodeParams{ + _, err = s.db.CreateNetworkNode(ctx, &db.CreateNetworkNodeParams{ NetworkID: networkID, NodeID: nodeID, Status: "pending", @@ -684,7 +684,7 @@ func (s *NetworkService) SetAnchorPeers(ctx context.Context, networkID, organiza return "", fmt.Errorf("failed to get network nodes: %w", err) } - var ordererURL, ordererTLSCert string + var ordererAddress, ordererTLSCert string // Look for orderer in our registry for _, node := range networkNodes { @@ -693,14 +693,14 @@ func (s *NetworkService) SetAnchorPeers(ctx context.Context, networkID, organiza if !ok { continue } - ordererURL = fmt.Sprintf("grpcs://%s", ordererConfig.ExternalEndpoint) + ordererAddress = ordererConfig.ExternalEndpoint ordererTLSCert = ordererConfig.TLSCACert break } } // If no orderer found in registry, try to get from current config block - if ordererURL == "" { + if ordererAddress == "" { // Get current config block configBlock, err := fabricDeployer.GetCurrentChannelConfig(networkID) if err != nil { @@ -715,16 +715,16 @@ func (s *NetworkService) SetAnchorPeers(ctx context.Context, networkID, organiza if len(ordererInfo) == 0 { return "", fmt.Errorf("no orderer found in config block") } - ordererURL = ordererInfo[0].URL + ordererAddress = ordererInfo[0].URL ordererTLSCert = ordererInfo[0].TLSCert } - if ordererURL == "" { + if ordererAddress == "" { return "", fmt.Errorf("no orderer found in network or config block") } // Set anchor peers using deployer with the found orderer info - txID, err := fabricDeployer.SetAnchorPeersWithOrderer(ctx, networkID, organizationID, deployerAnchorPeers, ordererURL, ordererTLSCert) + txID, err := fabricDeployer.SetAnchorPeersWithOrderer(ctx, networkID, organizationID, deployerAnchorPeers, ordererAddress, ordererTLSCert) if err != nil { return "", err } @@ -768,7 +768,7 @@ func (s *NetworkService) ReloadNetworkBlock(ctx context.Context, networkID int64 } configBlockB64 := base64.StdEncoding.EncodeToString(configBlock) - err = s.db.UpdateNetworkCurrentConfigBlock(ctx, db.UpdateNetworkCurrentConfigBlockParams{ + err = s.db.UpdateNetworkCurrentConfigBlock(ctx, &db.UpdateNetworkCurrentConfigBlockParams{ ID: networkID, CurrentConfigBlockB64: sql.NullString{String: configBlockB64, Valid: true}, }) @@ -916,3 +916,274 @@ func (s *NetworkService) importBesuNetwork(ctx context.Context, params ImportNet Message: "Besu network imported successfully", }, nil } + +// UpdateFabricNetwork prepares a config update proposal for a Fabric network +func (s *NetworkService) UpdateFabricNetwork(ctx context.Context, networkID int64, operations []fabric.ConfigUpdateOperation) (*fabric.ConfigUpdateProposal, error) { + // Get deployer for the network + deployer, err := s.deployerFactory.GetDeployer(string(BlockchainTypeFabric)) + if err != nil { + return nil, fmt.Errorf("failed to get deployer: %w", err) + } + + // Assert that it's a Fabric deployer + fabricDeployer, ok := deployer.(*fabric.FabricDeployer) + if !ok { + return nil, fmt.Errorf("network %d is not a Fabric network", networkID) + } + + // Prepare the config update + proposal, err := fabricDeployer.PrepareConfigUpdate(ctx, networkID, operations) + if err != nil { + return nil, fmt.Errorf("failed to prepare config update: %w", err) + } + + // Get organizations managed by us that can sign the config update + orgs, err := s.db.ListFabricOrganizations(ctx) + if err != nil { + return nil, fmt.Errorf("failed to get network organizations: %w", err) + } + var signingOrgIDs []string + for _, org := range orgs { + signingOrgIDs = append(signingOrgIDs, org.MspID) + } + + ordererAddress, ordererTLSCert, err := s.getOrdererAddressAndCertForNetwork(ctx, networkID, fabricDeployer) + if err != nil { + return nil, fmt.Errorf("failed to get orderer address and TLS certificate: %w", err) + } + + res, err := fabricDeployer.UpdateChannelConfig(ctx, networkID, proposal.ConfigUpdateEnvelope, signingOrgIDs, ordererAddress, ordererTLSCert) + if err != nil { + return nil, fmt.Errorf("failed to update channel config: %w", err) + } + s.logger.Info("Channel config updated", "txID", res) + return proposal, nil +} + +func (s *NetworkService) getOrdererAddressAndCertForNetwork(ctx context.Context, networkID int64, fabricDeployer *fabric.FabricDeployer) (string, string, error) { + + // Try to get orderer info from network nodes first + networkNodes, err := s.GetNetworkNodes(ctx, networkID) + if err != nil { + return "", "", fmt.Errorf("failed to get network nodes: %w", err) + } + + var ordererAddress, ordererTLSCert string + + // Look for orderer in our registry + for _, node := range networkNodes { + if node.Node.NodeType == nodetypes.NodeTypeFabricOrderer { + ordererConfig, ok := node.Node.DeploymentConfig.(*nodetypes.FabricOrdererDeploymentConfig) + if !ok { + continue + } + ordererAddress = ordererConfig.ExternalEndpoint + ordererTLSCert = ordererConfig.TLSCACert + break + } + } + + // If no orderer found in registry, try to get from current config block + if ordererAddress == "" { + // Get current config block + configBlock, err := fabricDeployer.GetCurrentChannelConfig(networkID) + if err != nil { + return "", "", fmt.Errorf("failed to get current config block: %w", err) + } + + // Extract orderer info from config block + ordererInfo, err := fabricDeployer.GetOrderersFromConfigBlock(ctx, configBlock) + if err != nil { + return "", "", fmt.Errorf("failed to get orderer info from config: %w", err) + } + if len(ordererInfo) == 0 { + return "", "", fmt.Errorf("no orderer found in config block") + } + ordererAddress = ordererInfo[0].URL + ordererTLSCert = ordererInfo[0].TLSCert + } + + if ordererAddress == "" { + return "", "", fmt.Errorf("no orderer found in network or config block") + } + + return ordererAddress, ordererTLSCert, nil +} + +func (s *NetworkService) GetChainInfo(ctx context.Context, networkID int64) (*ChainInfo, error) { + fabricDeployer, err := s.getFabricDeployerForNetwork(ctx, networkID) + if err != nil { + return nil, fmt.Errorf("failed to get fabric deployer: %w", err) + } + chainInfo, err := fabricDeployer.GetChainInfo(ctx, networkID) + if err != nil { + return nil, fmt.Errorf("failed to get chain info: %w", err) + } + return &ChainInfo{ + Height: chainInfo.Height, + CurrentBlockHash: chainInfo.CurrentBlockHash, + PreviousBlockHash: chainInfo.PreviousBlockHash, + }, nil +} + +// GetBlocks retrieves a paginated list of blocks from the network +func (s *NetworkService) GetBlocks(ctx context.Context, networkID int64, limit, offset int32, reverse bool) ([]Block, int64, error) { + // Get the fabric deployer for this network + fabricDeployer, err := s.getFabricDeployerForNetwork(ctx, networkID) + if err != nil { + return nil, 0, fmt.Errorf("failed to get fabric deployer: %w", err) + } + + // Use the fabric deployer to get blocks + fabricBlocks, total, err := fabricDeployer.GetBlocks(ctx, networkID, limit, offset, reverse) + if err != nil { + return nil, 0, fmt.Errorf("failed to get blocks: %w", err) + } + + // Map fabric.Block to service.Block + blocks := make([]Block, len(fabricBlocks)) + for i, fb := range fabricBlocks { + blocks[i], err = s.mapBlockToServiceBlock(fb) + if err != nil { + return nil, 0, fmt.Errorf("failed to map block to service block: %w", err) + } + } + + return blocks, total, nil +} + +func (s *NetworkService) mapBlockToServiceBlock(fb fabric.Block) (Block, error) { + return Block{ + Number: fb.Number, + Hash: fb.Hash, + PreviousHash: fb.PreviousHash, + Timestamp: fb.Timestamp, + TxCount: fb.TxCount, + Data: fb.Data, + }, nil +} + +type BlockWithTransactions struct { + Block Block + Transactions []Transaction +} + +// GetBlockTransactions retrieves all transactions from a specific block +func (s *NetworkService) GetBlockTransactions(ctx context.Context, networkID int64, blockNum uint64) (*BlockWithTransactions, error) { + // Get the fabric deployer for this network + fabricDeployer, err := s.getFabricDeployerForNetwork(ctx, networkID) + if err != nil { + return nil, fmt.Errorf("failed to get fabric deployer: %w", err) + } + + // Use the fabric deployer to get block transactions + fabricTransactions, err := fabricDeployer.GetBlockTransactions(ctx, networkID, blockNum) + if err != nil { + return nil, fmt.Errorf("failed to get block transactions: %w", err) + } + + // Map fabric.Transaction to service.Transaction + transactions := make([]Transaction, len(fabricTransactions.Transactions)) + for i, ft := range fabricTransactions.Transactions { + transactions[i] = Transaction{ + TxID: ft.ID, + BlockNumber: ft.BlockNum, + Timestamp: ft.Timestamp, + Type: ft.Type, + Creator: ft.Creator, + } + } + block, err := s.mapBlockToServiceBlock(fabricTransactions.Block) + if err != nil { + return nil, fmt.Errorf("failed to map block to service block: %w", err) + } + blockWithTransactions := &BlockWithTransactions{ + Block: block, + Transactions: transactions, + } + return blockWithTransactions, nil +} + +// GetTransaction retrieves a specific transaction by its ID +func (s *NetworkService) GetTransaction(ctx context.Context, networkID int64, txID string) (Transaction, error) { + // Get the fabric deployer for this network + fabricDeployer, err := s.getFabricDeployerForNetwork(ctx, networkID) + if err != nil { + return Transaction{}, fmt.Errorf("failed to get fabric deployer: %w", err) + } + + // Use the fabric deployer to get transaction + ft, err := fabricDeployer.GetTransaction(ctx, networkID, txID) + if err != nil { + return Transaction{}, fmt.Errorf("failed to get transaction: %w", err) + } + + // Map fabric.Transaction to service.Transaction + transaction := Transaction{ + TxID: ft.ID, + BlockNumber: ft.BlockNum, + Timestamp: ft.Timestamp, + Type: ft.Type, + Creator: ft.Creator, + } + + return transaction, nil +} + +// getFabricDeployerForNetwork creates and returns a fabric deployer for the specified network +func (s *NetworkService) getFabricDeployerForNetwork(ctx context.Context, networkID int64) (*fabric.FabricDeployer, error) { + // Get network details to verify it exists and is a Fabric network + network, err := s.db.GetNetwork(ctx, networkID) + if err != nil { + return nil, fmt.Errorf("failed to get network: %w", err) + } + deployer, err := s.deployerFactory.GetDeployer(network.Platform) + if err != nil { + return nil, fmt.Errorf("failed to get deployer: %w", err) + } + + fabricDeployer, ok := deployer.(*fabric.FabricDeployer) + if !ok { + return nil, fmt.Errorf("network %d is not a Fabric network", networkID) + } + + return fabricDeployer, nil +} + +// UpdateOrganizationCRL updates the CRL for an organization in the network +func (s *NetworkService) UpdateOrganizationCRL(ctx context.Context, networkID, organizationID int64) (string, error) { + // Get network details + network, err := s.db.GetNetwork(ctx, networkID) + if err != nil { + return "", fmt.Errorf("failed to get network: %w", err) + } + + // Get deployer + deployer, err := s.deployerFactory.GetDeployer(network.Platform) + if err != nil { + return "", fmt.Errorf("failed to get deployer: %w", err) + } + + fabricDeployer, ok := deployer.(*fabric.FabricDeployer) + if !ok { + return "", fmt.Errorf("network %d is not a Fabric network", networkID) + } + + // Update the CRL in the network + txID, err := fabricDeployer.UpdateOrganizationCRL(ctx, networkID, fabric.UpdateOrganizationCRLInput{ + OrganizationID: organizationID, + }) + if err != nil { + return "", fmt.Errorf("failed to update CRL: %w", err) + } + + logrus.Info("Reloading network block after updating CRL, waiting 3 seconds") + time.Sleep(3 * time.Second) + + // Reload network block + if err := s.ReloadNetworkBlock(ctx, networkID); err != nil { + logrus.Errorf("Failed to reload network block after updating CRL: %v", err) + } + + return txID, nil +} diff --git a/pkg/networks/service/types.go b/pkg/networks/service/types.go index 94c6fe9..ac3c48f 100644 --- a/pkg/networks/service/types.go +++ b/pkg/networks/service/types.go @@ -1,5 +1,9 @@ package service +import ( + "time" +) + type ImportNetworkParams struct { NetworkType string GenesisFile []byte @@ -66,3 +70,29 @@ type RemoveOrdererResponse struct { NetworkID int64 `json:"network_id"` OrdererID int64 `json:"orderer_id"` } + +// Block represents a block in the blockchain +type Block struct { + Number uint64 `json:"number"` + Hash string `json:"hash"` + PreviousHash string `json:"previous_hash"` + Timestamp time.Time `json:"timestamp"` + TxCount int `json:"tx_count"` + Data []byte `json:"data"` +} + +// Transaction represents a transaction in a block +type Transaction struct { + TxID string `json:"tx_id"` + BlockNumber uint64 `json:"block_number"` + Timestamp time.Time `json:"timestamp"` + Type string `json:"type"` + Creator string `json:"creator"` + Payload []byte `json:"payload"` +} + +type ChainInfo struct { + Height uint64 + CurrentBlockHash string + PreviousBlockHash string +} diff --git a/pkg/nodes/besu/besu.go b/pkg/nodes/besu/besu.go index 2acde7a..611500c 100644 --- a/pkg/nodes/besu/besu.go +++ b/pkg/nodes/besu/besu.go @@ -10,17 +10,23 @@ import ( "runtime" "strings" + "github.com/chainlaunch/chainlaunch/pkg/config" "github.com/chainlaunch/chainlaunch/pkg/logger" + "github.com/chainlaunch/chainlaunch/pkg/networks/service/types" + settingsservice "github.com/chainlaunch/chainlaunch/pkg/settings/service" "golang.org/x/text/encoding/unicode" "golang.org/x/text/transform" ) // LocalBesu represents a local Besu node type LocalBesu struct { - opts StartBesuOpts - mode string - nodeID int64 - logger *logger.Logger + opts StartBesuOpts + mode string + nodeID int64 + NetworkConfig types.BesuNetworkConfig + logger *logger.Logger + configService *config.ConfigService + settingsService *settingsservice.SettingsService } // NewLocalBesu creates a new LocalBesu instance @@ -29,12 +35,18 @@ func NewLocalBesu( mode string, nodeID int64, logger *logger.Logger, + configService *config.ConfigService, + settingsService *settingsservice.SettingsService, + networkConfig types.BesuNetworkConfig, ) *LocalBesu { return &LocalBesu{ - opts: opts, - mode: mode, - nodeID: nodeID, - logger: logger, + opts: opts, + mode: mode, + nodeID: nodeID, + logger: logger, + configService: configService, + settingsService: settingsService, + NetworkConfig: networkConfig, } } @@ -43,16 +55,13 @@ func (b *LocalBesu) Start() (interface{}, error) { b.logger.Info("Starting Besu node", "opts", b.opts) // Create necessary directories - homeDir, err := os.UserHomeDir() - if err != nil { - return nil, fmt.Errorf("failed to get home directory: %w", err) - } + chainlaunchDir := b.configService.GetDataPath() slugifiedID := strings.ReplaceAll(strings.ToLower(b.opts.ID), " ", "-") - dirPath := filepath.Join(homeDir, ".chainlaunch/besu", slugifiedID) + dirPath := filepath.Join(chainlaunchDir, "besu", slugifiedID) dataDir := filepath.Join(dirPath, "data") configDir := filepath.Join(dirPath, "config") - binDir := filepath.Join(homeDir, ".chainlaunch/bin/besu", b.opts.Version) + binDir := filepath.Join(chainlaunchDir, "bin/besu", b.opts.Version) // Create directories for _, dir := range []string{dataDir, configDir, binDir} { @@ -78,12 +87,12 @@ func (b *LocalBesu) Start() (interface{}, error) { } // Build command and environment - cmd := b.buildCommand(dataDir, genesisPath) + cmd := b.buildCommand(dataDir, genesisPath, configDir) env := b.buildEnvironment() switch b.mode { case "service": - return b.startService(cmd, env, dirPath) + return b.startService(cmd, env, dirPath, configDir) case "docker": return b.startDocker(env, dataDir, configDir) default: @@ -135,7 +144,7 @@ func (b *LocalBesu) checkPrerequisites() error { } // buildCommand builds the command to start Besu -func (b *LocalBesu) buildCommand(dataDir string, genesisPath string) string { +func (b *LocalBesu) buildCommand(dataDir string, genesisPath string, configDir string) string { var besuBinary string if runtime.GOOS == "darwin" { if runtime.GOARCH == "arm64" { @@ -144,30 +153,31 @@ func (b *LocalBesu) buildCommand(dataDir string, genesisPath string) string { besuBinary = "/usr/local/opt/besu/bin/besu" } } else { - homeDir, _ := os.UserHomeDir() - besuBinary = filepath.Join(homeDir, ".chainlaunch/bin/besu", b.opts.Version, "besu") + besuBinary = filepath.Join(b.configService.GetDataPath(), "bin/besu", b.opts.Version, "besu") } + keyPath := filepath.Join(configDir, "key") + cmd := []string{ besuBinary, - "--data-path=" + dataDir, - "--genesis-file=" + genesisPath, - fmt.Sprintf("--network-id=%d", b.opts.NetworkID), + fmt.Sprintf("--data-path=%s", dataDir), + fmt.Sprintf("--genesis-file=%s", genesisPath), "--rpc-http-enabled", - fmt.Sprintf("--rpc-http-port=%s", b.opts.RPCPort), - fmt.Sprintf("--p2p-port=%s", b.opts.P2PPort), - "--rpc-http-api=ADMIN,ETH,NET,PERM,QBFT,WEB3", - "--host-allowlist=*", - "--miner-enabled", - fmt.Sprintf("--miner-coinbase=%s", b.opts.MinerAddress), - "--min-gas-price=1000000000", - "--sync-mode=FULL", + "--rpc-http-api=ETH,NET,QBFT", "--rpc-http-cors-origins=all", - fmt.Sprintf("--node-private-key-file=%s", b.opts.NodePrivateKey), - fmt.Sprintf("--p2p-host=%s", b.opts.ListenAddress), "--rpc-http-host=0.0.0.0", - "--discovery-enabled=true", + fmt.Sprintf("--rpc-http-port=%s", b.opts.RPCPort), + "--min-gas-price=1000000000", + fmt.Sprintf("--network-id=%d", b.opts.ChainID), + "--host-allowlist=*", + fmt.Sprintf("--node-private-key-file=%s", keyPath), + "--p2p-enabled=true", + fmt.Sprintf("--p2p-host=%s", b.opts.P2PHost), + fmt.Sprintf("--p2p-port=%s", b.opts.P2PPort), + "--nat-method=NONE", + "--discovery-enabled=true", + "--profile=ENTERPRISE", } // Add bootnodes if specified @@ -371,8 +381,7 @@ func (b *LocalBesu) installBesuMacOS() error { } // Create symlink to our bin directory - homeDir, _ := os.UserHomeDir() - binDir := filepath.Join(homeDir, ".chainlaunch/bin/besu", b.opts.Version) + binDir := filepath.Join(b.configService.GetDataPath(), "bin/besu", b.opts.Version) if err := os.MkdirAll(binDir, 0755); err != nil { return fmt.Errorf("failed to create bin directory: %w", err) } @@ -390,18 +399,17 @@ func (b *LocalBesu) installBesuMacOS() error { return nil } +func (b *LocalBesu) getLogPath() string { + return b.GetStdOutPath() +} + // TailLogs tails the logs of the besu service func (b *LocalBesu) TailLogs(ctx context.Context, tail int, follow bool) (<-chan string, error) { logChan := make(chan string, 100) // Get log file path based on ID - homeDir, err := os.UserHomeDir() - if err != nil { - close(logChan) - return logChan, fmt.Errorf("failed to get home directory: %w", err) - } slugifiedID := strings.ReplaceAll(strings.ToLower(b.opts.ID), " ", "-") - logPath := filepath.Join(homeDir, ".chainlaunch/besu", slugifiedID, b.getServiceName()+".log") + logPath := filepath.Join(b.configService.GetDataPath(), "besu", slugifiedID, b.getServiceName()+".log") // Check if log file exists if _, err := os.Stat(logPath); os.IsNotExist(err) { diff --git a/pkg/nodes/besu/docker.go b/pkg/nodes/besu/docker.go index 6b2e194..9491dbb 100644 --- a/pkg/nodes/besu/docker.go +++ b/pkg/nodes/besu/docker.go @@ -81,7 +81,7 @@ func (b *LocalBesu) startDocker(env map[string]string, dataDir, configDir string // Create container config config := &container.Config{ Image: imageName, - Cmd: b.buildBesuCommand("/opt/besu/data", "/opt/besu/config"), + Cmd: b.buildDockerBesuCommand("/opt/besu/data", "/opt/besu/config"), Env: formatEnvForDocker(env), ExposedPorts: nat.PortSet{}, } @@ -188,3 +188,36 @@ func formatEnvForDocker(env map[string]string) []string { } return result } + +// buildBesuCommand builds the command arguments for Besu +func (b *LocalBesu) buildDockerBesuCommand(dataPath, configPath string) []string { + cmd := []string{ + "besu", + fmt.Sprintf("--network-id=%d", b.opts.ChainID), + fmt.Sprintf("--data-path=%s", dataPath), + fmt.Sprintf("--genesis-file=%s", filepath.Join(configPath, "genesis.json")), + "--rpc-http-enabled", + fmt.Sprintf("--rpc-http-port=%s", b.opts.RPCPort), + fmt.Sprintf("--p2p-port=%s", b.opts.P2PPort), + "--rpc-http-api=ADMIN,ETH,NET,PERM,QBFT,WEB3,TXPOOL", + "--host-allowlist=*", + "--miner-enabled", + fmt.Sprintf("--miner-coinbase=%s", b.opts.MinerAddress), + "--min-gas-price=1000000000", + "--rpc-http-cors-origins=all", + fmt.Sprintf("--node-private-key-file=%s", filepath.Join(configPath, "key")), + fmt.Sprintf("--p2p-host=%s", b.opts.ListenAddress), + "--rpc-http-host=0.0.0.0", + "--discovery-enabled=true", + "--sync-mode=FULL", + "--revert-reason-enabled=true", + "--validator-priority-enabled=true", + } + + // Add bootnodes if specified + if len(b.opts.BootNodes) > 0 { + cmd = append(cmd, fmt.Sprintf("--bootnodes=%s", strings.Join(b.opts.BootNodes, ","))) + } + + return cmd +} diff --git a/pkg/nodes/besu/service.go b/pkg/nodes/besu/service.go index b3eef05..2af486e 100644 --- a/pkg/nodes/besu/service.go +++ b/pkg/nodes/besu/service.go @@ -18,7 +18,7 @@ func (b *LocalBesu) getServiceName() string { // getLaunchdServiceName returns the launchd service name func (b *LocalBesu) getLaunchdServiceName() string { - return fmt.Sprintf("ai.chainlaunch.besu.%s", + return fmt.Sprintf("dev.chainlaunch.besu.%s", strings.ReplaceAll(strings.ToLower(b.opts.ID), " ", "-")) } @@ -34,9 +34,8 @@ func (b *LocalBesu) getLaunchdPlistPath() string { } // startService starts the besu as a system service -func (b *LocalBesu) startService(cmd string, env map[string]string, dirPath string) (*StartServiceResponse, error) { +func (b *LocalBesu) startService(cmd string, env map[string]string, dirPath, configDir string) (*StartServiceResponse, error) { // Write genesis file to config directory - configDir := filepath.Join(dirPath, "config") if err := os.MkdirAll(configDir, 0755); err != nil { return nil, fmt.Errorf("failed to create config directory: %w", err) } @@ -86,38 +85,6 @@ func (b *LocalBesu) startService(cmd string, env map[string]string, dirPath stri } } -// buildBesuCommand builds the command arguments for Besu -func (b *LocalBesu) buildBesuCommand(dataPath, configPath string) []string { - cmd := []string{ - "besu", - fmt.Sprintf("--data-path=%s", dataPath), - fmt.Sprintf("--genesis-file=%s", filepath.Join(configPath, "genesis.json")), - fmt.Sprintf("--network-id=%d", b.opts.NetworkID), - "--rpc-http-enabled", - fmt.Sprintf("--rpc-http-port=%s", b.opts.RPCPort), - fmt.Sprintf("--p2p-port=%s", b.opts.P2PPort), - "--rpc-http-api=ADMIN,ETH,NET,PERM,QBFT,WEB3", - "--host-allowlist=*", - "--miner-enabled", - fmt.Sprintf("--miner-coinbase=%s", b.opts.MinerAddress), - "--min-gas-price=1000000000", - "--sync-mode=FULL", - "--rpc-http-cors-origins=all", - fmt.Sprintf("--node-private-key-file=%s", filepath.Join(configPath, "key")), - fmt.Sprintf("--p2p-host=%s", b.opts.ListenAddress), - "--rpc-http-host=0.0.0.0", - "--discovery-enabled=true", - "--p2p-enabled=true", - } - - // Add bootnodes if specified - if len(b.opts.BootNodes) > 0 { - cmd = append(cmd, fmt.Sprintf("--bootnodes=%s", strings.Join(b.opts.BootNodes, ","))) - } - - return cmd -} - // createSystemdService creates a systemd service file func (b *LocalBesu) createSystemdService(cmd string, env map[string]string, dirPath, genesisPath, keyPath string) error { var envStrings []string @@ -125,12 +92,6 @@ func (b *LocalBesu) createSystemdService(cmd string, env map[string]string, dirP envStrings = append(envStrings, fmt.Sprintf("Environment=\"%s=%s\"", k, v)) } - // Build command using the common builder - cmdArgs := b.buildBesuCommand( - filepath.Join(dirPath, "data"), - filepath.Join(dirPath, "config"), - ) - tmpl := template.Must(template.New("systemd").Parse(` [Unit] Description=Hyperledger Besu Node - {{.ID}} @@ -158,7 +119,7 @@ WantedBy=multi-user.target }{ ID: b.opts.ID, DirPath: dirPath, - Cmd: strings.Join(cmdArgs, " "), + Cmd: cmd, EnvVars: envStrings, } @@ -182,10 +143,6 @@ func (b *LocalBesu) createLaunchdService(cmd string, env map[string]string, dirP } // Build command using the common builder - cmdArgs := b.buildBesuCommand( - filepath.Join(dirPath, "data"), - filepath.Join(dirPath, "config"), - ) tmpl := template.Must(template.New("launchd").Parse(` @@ -220,7 +177,7 @@ func (b *LocalBesu) createLaunchdService(cmd string, env map[string]string, dirP EnvVars []string }{ ServiceName: b.getLaunchdServiceName(), - Cmd: strings.Join(cmdArgs, " "), + Cmd: cmd, LogPath: filepath.Join(dirPath, b.getServiceName()+".log"), EnvVars: envStrings, } @@ -251,6 +208,10 @@ func (b *LocalBesu) startSystemdService() error { return b.execSystemctl("restart", b.getServiceName()) } +func (b *LocalBesu) GetStdOutPath() string { + return filepath.Join(b.configService.GetDataPath(), "besu", strings.ReplaceAll(strings.ToLower(b.opts.ID), " ", "-"), b.getServiceName()+".log") +} + // startLaunchdService starts the launchd service func (b *LocalBesu) startLaunchdService() error { cmd := exec.Command("launchctl", "load", b.getLaunchdPlistPath()) @@ -314,10 +275,24 @@ func (b *LocalBesu) stopLaunchdService() error { // execSystemctl executes a systemctl command func (b *LocalBesu) execSystemctl(command string, args ...string) error { - cmdArgs := append([]string{"systemctl", command}, args...) - cmd := exec.Command("sudo", cmdArgs...) - if err := cmd.Run(); err != nil { - return fmt.Errorf("systemctl %s failed: %w", command, err) + cmdArgs := append([]string{command}, args...) + + // Check if sudo is available + sudoPath, err := exec.LookPath("sudo") + if err == nil { + // sudo is available, use it + cmdArgs = append([]string{"systemctl"}, cmdArgs...) + cmd := exec.Command(sudoPath, cmdArgs...) + if err := cmd.Run(); err != nil { + return fmt.Errorf("systemctl %s failed: %w", command, err) + } + } else { + // sudo is not available, run directly + cmd := exec.Command("systemctl", cmdArgs...) + if err := cmd.Run(); err != nil { + return fmt.Errorf("systemctl %s failed: %w", command, err) + } } + return nil } diff --git a/pkg/nodes/besu/types.go b/pkg/nodes/besu/types.go index 15218f5..406be4e 100644 --- a/pkg/nodes/besu/types.go +++ b/pkg/nodes/besu/types.go @@ -4,10 +4,13 @@ package besu type StartBesuOpts struct { ID string `json:"id"` ListenAddress string `json:"listenAddress"` + P2PHost string `json:"p2pHost"` P2PPort string `json:"p2pPort"` + RPCHost string `json:"rpcHost"` RPCPort string `json:"rpcPort"` ConsensusType string `json:"consensusType"` NetworkID int64 `json:"networkId"` + ChainID int64 `json:"chainId"` GenesisFile string `json:"genesisFile"` NodePrivateKey string `json:"nodePrivateKey"` MinerAddress string `json:"minerAddress"` diff --git a/pkg/nodes/http/handler.go b/pkg/nodes/http/handler.go index 4d059bb..eb23987 100644 --- a/pkg/nodes/http/handler.go +++ b/pkg/nodes/http/handler.go @@ -50,7 +50,7 @@ func (h *NodeHandler) RegisterRoutes(r chi.Router) { r.Get("/platform/{platform}", response.Middleware(h.ListNodesByPlatform)) r.Get("/defaults/fabric-peer", response.Middleware(h.GetFabricPeerDefaults)) r.Get("/defaults/fabric-orderer", response.Middleware(h.GetFabricOrdererDefaults)) - r.Get("/defaults/fabric", response.Middleware(h.GetNodesDefaults)) + r.Get("/defaults/fabric", response.Middleware(h.GetFabricNodesDefaults)) r.Get("/defaults/besu-node", response.Middleware(h.GetBesuNodeDefaults)) r.Get("/{id}", response.Middleware(h.GetNode)) r.Post("/{id}/start", response.Middleware(h.StartNode)) @@ -59,13 +59,16 @@ func (h *NodeHandler) RegisterRoutes(r chi.Router) { r.Delete("/{id}", response.Middleware(h.DeleteNode)) r.Get("/{id}/logs", h.TailLogs) r.Get("/{id}/events", response.Middleware(h.GetNodeEvents)) + r.Get("/{id}/channels", response.Middleware(h.GetNodeChannels)) + r.Post("/{id}/certificates/renew", response.Middleware(h.RenewCertificates)) + r.Put("/{id}", response.Middleware(h.UpdateNode)) }) } // CreateNode godoc // @Summary Create a new node // @Description Create a new node with the specified configuration -// @Tags nodes +// @Tags Nodes // @Accept json // @Produce json // @Param request body CreateNodeRequest true "Node creation request" @@ -115,7 +118,7 @@ func (h *NodeHandler) CreateNode(w http.ResponseWriter, r *http.Request) error { // GetNode godoc // @Summary Get a node // @Description Get a node by ID -// @Tags nodes +// @Tags Nodes // @Accept json // @Produce json // @Param id path int true "Node ID" @@ -146,7 +149,7 @@ func (h *NodeHandler) GetNode(w http.ResponseWriter, r *http.Request) error { // ListNodes godoc // @Summary List all nodes // @Description Get a paginated list of nodes with optional platform filter -// @Tags nodes +// @Tags Nodes // @Accept json // @Produce json // @Param platform query string false "Filter by blockchain platform" @@ -193,7 +196,7 @@ func (h *NodeHandler) ListNodes(w http.ResponseWriter, r *http.Request) error { // ListNodesByPlatform godoc // @Summary List nodes by platform // @Description Get a paginated list of nodes filtered by blockchain platform -// @Tags nodes +// @Tags Nodes // @Accept json // @Produce json // @Param platform path string true "Blockchain platform (FABRIC/BESU)" Enums(FABRIC,BESU) @@ -238,7 +241,7 @@ func (h *NodeHandler) ListNodesByPlatform(w http.ResponseWriter, r *http.Request // StartNode godoc // @Summary Start a node // @Description Start a node by ID -// @Tags nodes +// @Tags Nodes // @Accept json // @Produce json // @Param id path int true "Node ID" @@ -269,7 +272,7 @@ func (h *NodeHandler) StartNode(w http.ResponseWriter, r *http.Request) error { // StopNode godoc // @Summary Stop a node // @Description Stop a node by ID -// @Tags nodes +// @Tags Nodes // @Accept json // @Produce json // @Param id path int true "Node ID" @@ -300,7 +303,7 @@ func (h *NodeHandler) StopNode(w http.ResponseWriter, r *http.Request) error { // RestartNode godoc // @Summary Restart a node // @Description Restart a node by ID (stops and starts the node) -// @Tags nodes +// @Tags Nodes // @Accept json // @Produce json // @Param id path int true "Node ID" @@ -338,7 +341,7 @@ func (h *NodeHandler) RestartNode(w http.ResponseWriter, r *http.Request) error // DeleteNode godoc // @Summary Delete a node // @Description Delete a node by ID -// @Tags nodes +// @Tags Nodes // @Accept json // @Produce json // @Param id path int true "Node ID" @@ -368,7 +371,7 @@ func (h *NodeHandler) DeleteNode(w http.ResponseWriter, r *http.Request) error { // GetFabricPeerDefaults godoc // @Summary Get default values for Fabric peer node // @Description Get default configuration values for a Fabric peer node -// @Tags nodes +// @Tags Nodes // @Produce json // @Success 200 {object} service.NodeDefaults // @Failure 500 {object} response.ErrorResponse "Internal server error" @@ -381,7 +384,7 @@ func (h *NodeHandler) GetFabricPeerDefaults(w http.ResponseWriter, r *http.Reque // GetFabricOrdererDefaults godoc // @Summary Get default values for Fabric orderer node // @Description Get default configuration values for a Fabric orderer node -// @Tags nodes +// @Tags Nodes // @Produce json // @Success 200 {object} service.NodeDefaults // @Failure 500 {object} response.ErrorResponse "Internal server error" @@ -391,10 +394,10 @@ func (h *NodeHandler) GetFabricOrdererDefaults(w http.ResponseWriter, r *http.Re return response.WriteJSON(w, http.StatusOK, defaults) } -// GetNodesDefaults godoc +// GetFabricNodesDefaults godoc // @Summary Get default values for multiple Fabric nodes // @Description Get default configuration values for multiple Fabric nodes -// @Tags nodes +// @Tags Nodes // @Produce json // @Param peerCount query int false "Number of peer nodes" default(1) minimum(0) // @Param ordererCount query int false "Number of orderer nodes" default(1) minimum(0) @@ -403,7 +406,7 @@ func (h *NodeHandler) GetFabricOrdererDefaults(w http.ResponseWriter, r *http.Re // @Failure 400 {object} response.ErrorResponse "Validation error" // @Failure 500 {object} response.ErrorResponse "Internal server error" // @Router /nodes/defaults/fabric [get] -func (h *NodeHandler) GetNodesDefaults(w http.ResponseWriter, r *http.Request) error { +func (h *NodeHandler) GetFabricNodesDefaults(w http.ResponseWriter, r *http.Request) error { // Parse query parameters peerCount := 1 if countStr := r.URL.Query().Get("peerCount"); countStr != "" { @@ -431,7 +434,7 @@ func (h *NodeHandler) GetNodesDefaults(w http.ResponseWriter, r *http.Request) e }) } - result, err := h.service.GetNodesDefaults(service.NodesDefaultsParams{ + result, err := h.service.GetFabricNodesDefaults(service.NodesDefaultsParams{ PeerCount: peerCount, OrdererCount: ordererCount, Mode: mode, @@ -446,23 +449,38 @@ func (h *NodeHandler) GetNodesDefaults(w http.ResponseWriter, r *http.Request) e // GetBesuNodeDefaults godoc // @Summary Get default values for Besu node // @Description Get default configuration values for a Besu node -// @Tags nodes +// @Tags Nodes // @Produce json -// @Success 200 {object} service.BesuNodeDefaults +// @Param besuNodes query int false "Number of Besu nodes" default(1) minimum(0) +// @Success 200 {array} BesuNodeDefaultsResponse // @Failure 500 {object} response.ErrorResponse "Internal server error" // @Router /nodes/defaults/besu-node [get] func (h *NodeHandler) GetBesuNodeDefaults(w http.ResponseWriter, r *http.Request) error { - defaults, err := h.service.GetBesuNodeDefaults() + // Parse besuNodes parameter + besuNodes := 1 + if countStr := r.URL.Query().Get("besuNodes"); countStr != "" { + if count, err := strconv.Atoi(countStr); err == nil && count >= 0 { + besuNodes = count + } + } + + defaults, err := h.service.GetBesuNodeDefaults(besuNodes) if err != nil { return errors.NewInternalError("failed to get Besu node defaults", err, nil) } - return response.WriteJSON(w, http.StatusOK, defaults) + + res := BesuNodeDefaultsResponse{ + NodeCount: besuNodes, + Defaults: defaults, + } + + return response.WriteJSON(w, http.StatusOK, res) } // TailLogs godoc // @Summary Tail node logs // @Description Stream logs from a specific node -// @Tags nodes +// @Tags Nodes // @Accept json // @Produce text/event-stream // @Param id path int true "Node ID" @@ -540,7 +558,7 @@ func (h *NodeHandler) TailLogs(w http.ResponseWriter, r *http.Request) { // GetNodeEvents godoc // @Summary Get node events // @Description Get a paginated list of events for a specific node -// @Tags nodes +// @Tags Nodes // @Accept json // @Produce json // @Param id path int true "Node ID" @@ -593,99 +611,68 @@ func (h *NodeHandler) GetNodeEvents(w http.ResponseWriter, r *http.Request) erro return response.WriteJSON(w, http.StatusOK, eventsResponse) } -// Mapping functions - -func mapHTTPToServiceFabricPeerConfig(config *types.FabricPeerConfig) *types.FabricPeerConfig { - if config == nil { - return nil - } - return &types.FabricPeerConfig{ - Name: config.Name, - OrganizationID: config.OrganizationID, - ExternalEndpoint: config.ExternalEndpoint, - ListenAddress: config.ListenAddress, - EventsAddress: config.EventsAddress, - OperationsListenAddress: config.OperationsListenAddress, - ChaincodeAddress: config.ChaincodeAddress, - DomainNames: config.DomainNames, - Env: config.Env, - MSPID: config.MSPID, +// GetNodeChannels godoc +// @Summary Get channels for a Fabric node +// @Description Retrieves all channels for a specific Fabric node +// @Tags Nodes +// @Accept json +// @Produce json +// @Param id path int true "Node ID" +// @Success 200 {object} NodeChannelsResponse +// @Failure 400 {object} response.ErrorResponse "Validation error" +// @Failure 404 {object} response.ErrorResponse "Node not found" +// @Failure 500 {object} response.ErrorResponse "Internal server error" +// @Router /nodes/{id}/channels [get] +func (h *NodeHandler) GetNodeChannels(w http.ResponseWriter, r *http.Request) error { + id, err := strconv.ParseInt(chi.URLParam(r, "id"), 10, 64) + if err != nil { + return errors.NewValidationError("invalid node ID", map[string]interface{}{ + "error": err.Error(), + }) } -} -func mapHTTPToServiceFabricOrdererConfig(config *types.FabricOrdererConfig) *types.FabricOrdererConfig { - if config == nil { - return nil - } - return &types.FabricOrdererConfig{ - Name: config.Name, - OrganizationID: config.OrganizationID, - // Mode: "service", - ExternalEndpoint: config.ExternalEndpoint, - ListenAddress: config.ListenAddress, - AdminAddress: config.AdminAddress, - OperationsListenAddress: config.OperationsListenAddress, - DomainNames: config.DomainNames, - Env: config.Env, - MSPID: config.MSPID, + channels, err := h.service.GetNodeChannels(r.Context(), id) + if err != nil { + if err == service.ErrNotFound { + return errors.NewNotFoundError("node not found", nil) + } + if err == service.ErrInvalidNodeType { + return errors.NewValidationError("node is not a Fabric node", nil) + } + return errors.NewInternalError("failed to get node channels", err, nil) } -} -func mapHTTPToServiceBesuNodeConfig(config *types.BesuNodeConfig) *types.BesuNodeConfig { - if config == nil { - return nil - } - return &types.BesuNodeConfig{ - NetworkID: config.NetworkID, - P2PPort: config.P2PPort, - RPCPort: config.RPCPort, - BaseNodeConfig: types.BaseNodeConfig{ - Type: "besu", - Mode: config.Mode, - }, - KeyID: config.KeyID, - P2PHost: config.P2PHost, - RPCHost: config.RPCHost, - InternalIP: config.InternalIP, - ExternalIP: config.ExternalIP, - Env: config.Env, + channelsResponse := NodeChannelsResponse{ + NodeID: id, + Channels: make([]ChannelResponse, len(channels)), } -} -func mapServiceToHTTPFabricPeerDeploymentConfig(config *types.FabricPeerDeploymentConfig) *types.FabricPeerDeploymentConfig { - if config == nil { - return nil + + for i, channel := range channels { + channelsResponse.Channels[i] = toChannelResponse(channel) } - return config + + return response.WriteJSON(w, http.StatusOK, channelsResponse) } -func mapServiceToHTTPNodeResponse(node *service.NodeResponse) NodeResponse { - return NodeResponse{ - ID: node.ID, - Name: node.Name, - BlockchainPlatform: string(node.Platform), - NodeType: string(node.NodeType), - Status: string(node.Status), - Endpoint: node.Endpoint, - CreatedAt: node.CreatedAt, - UpdatedAt: node.UpdatedAt, - FabricPeer: node.FabricPeer, - FabricOrderer: node.FabricOrderer, - BesuNode: node.BesuNode, - } +// NodeChannelsResponse represents the response for node channels +type NodeChannelsResponse struct { + NodeID int64 `json:"nodeId"` + Channels []ChannelResponse `json:"channels"` } -func mapServiceToHTTPPaginatedResponse(nodes *service.PaginatedNodes) PaginatedNodesResponse { - items := make([]NodeResponse, len(nodes.Items)) - for i, node := range nodes.Items { - items[i] = mapServiceToHTTPNodeResponse(&node) - } +// ChannelResponse represents a Fabric channel in the response +type ChannelResponse struct { + Name string `json:"name"` + BlockNum int64 `json:"blockNum"` + CreatedAt time.Time `json:"createdAt,omitempty"` +} - return PaginatedNodesResponse{ - Items: items, - Total: nodes.Total, - Page: nodes.Page, - PageCount: nodes.PageCount, - HasNextPage: nodes.HasNextPage, +// Helper function to convert service channel to response channel +func toChannelResponse(channel service.Channel) ChannelResponse { + return ChannelResponse{ + Name: channel.Name, + BlockNum: channel.BlockNum, + CreatedAt: channel.CreatedAt, } } @@ -705,18 +692,6 @@ func toNodeResponse(node *service.NodeResponse) NodeResponse { } } -func isValidEventType(eventType service.NodeEventType) bool { - switch eventType { - case service.NodeEventStarting, - service.NodeEventStarted, - service.NodeEventStopping, - service.NodeEventStopped, - service.NodeEventError: - return true - } - return false -} - // Helper function to validate platform func isValidPlatform(platform types.BlockchainPlatform) bool { switch platform { @@ -735,3 +710,205 @@ func toNodeEventResponse(event service.NodeEvent) NodeEventResponse { CreatedAt: event.CreatedAt, } } + +// RenewCertificates godoc +// @Summary Renew node certificates +// @Description Renews the TLS and signing certificates for a Fabric node +// @Tags Nodes +// @Accept json +// @Produce json +// @Param id path int true "Node ID" +// @Success 200 {object} NodeResponse +// @Failure 400 {object} response.ErrorResponse "Validation error" +// @Failure 404 {object} response.ErrorResponse "Node not found" +// @Failure 500 {object} response.ErrorResponse "Internal server error" +// @Router /nodes/{id}/certificates/renew [post] +func (h *NodeHandler) RenewCertificates(w http.ResponseWriter, r *http.Request) error { + id, err := strconv.ParseInt(chi.URLParam(r, "id"), 10, 64) + if err != nil { + return errors.NewValidationError("invalid node ID", map[string]interface{}{ + "error": err.Error(), + }) + } + + node, err := h.service.RenewCertificates(r.Context(), id) + if err != nil { + if errors.IsType(err, errors.NotFoundError) { + return errors.NewNotFoundError("node not found", nil) + } + return errors.NewInternalError("failed to renew certificates", err, nil) + } + + return response.WriteJSON(w, http.StatusOK, toNodeResponse(node)) +} + +// UpdateNode godoc +// @Summary Update a node +// @Description Updates an existing node's configuration based on its type +// @Tags Nodes +// @Accept json +// @Produce json +// @Param id path int true "Node ID" +// @Param request body UpdateNodeRequest true "Update node request" +// @Success 200 {object} NodeResponse +// @Failure 400 {object} response.ErrorResponse "Validation error" +// @Failure 404 {object} response.ErrorResponse "Node not found" +// @Failure 500 {object} response.ErrorResponse "Internal server error" +// @Router /nodes/{id} [put] +func (h *NodeHandler) UpdateNode(w http.ResponseWriter, r *http.Request) error { + nodeID, err := strconv.ParseInt(chi.URLParam(r, "id"), 10, 64) + if err != nil { + return errors.NewValidationError("invalid node ID", map[string]interface{}{ + "error": err.Error(), + }) + } + + var req UpdateNodeRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + return errors.NewValidationError("invalid request body", map[string]interface{}{ + "error": err.Error(), + }) + } + + // Get the node to determine its type + node, err := h.service.GetNode(r.Context(), nodeID) + if err != nil { + if errors.IsType(err, errors.NotFoundError) { + return errors.NewNotFoundError("node not found", nil) + } + return errors.NewInternalError("failed to get node", err, nil) + } + + switch node.NodeType { + case types.NodeTypeFabricPeer: + if req.FabricPeer == nil { + return errors.NewValidationError("fabricPeer configuration is required for Fabric peer nodes", nil) + } + return h.updateFabricPeer(w, r, nodeID, req.FabricPeer) + case types.NodeTypeFabricOrderer: + if req.FabricOrderer == nil { + return errors.NewValidationError("fabricOrderer configuration is required for Fabric orderer nodes", nil) + } + return h.updateFabricOrderer(w, r, nodeID, req.FabricOrderer) + case types.NodeTypeBesuFullnode: + if req.BesuNode == nil { + return errors.NewValidationError("besuNode configuration is required for Besu nodes", nil) + } + return h.updateBesuNode(w, r, nodeID, req.BesuNode) + default: + return errors.NewValidationError("unsupported node type", map[string]interface{}{ + "nodeType": node.NodeType, + }) + } +} + +// updateBesuNode handles updating a Besu node +func (h *NodeHandler) updateBesuNode(w http.ResponseWriter, r *http.Request, nodeID int64, req *UpdateBesuNodeRequest) error { + // Convert HTTP layer request to service layer request + serviceReq := service.UpdateBesuNodeRequest{ + NetworkID: req.NetworkID, + P2PHost: req.P2PHost, + P2PPort: req.P2PPort, + RPCHost: req.RPCHost, + RPCPort: req.RPCPort, + Bootnodes: req.Bootnodes, + ExternalIP: req.ExternalIP, + InternalIP: req.InternalIP, + Env: req.Env, + } + + // Call service layer to update the Besu node + updatedNode, err := h.service.UpdateBesuNode(r.Context(), nodeID, serviceReq) + if err != nil { + if errors.IsType(err, errors.ValidationError) { + return errors.NewValidationError("invalid besu node configuration", map[string]interface{}{ + "error": err.Error(), + }) + } + if errors.IsType(err, errors.NotFoundError) { + return errors.NewNotFoundError("node not found", nil) + } + return errors.NewInternalError("failed to update besu node", err, nil) + } + + // Return the updated node as response + return response.WriteJSON(w, http.StatusOK, toNodeResponse(updatedNode)) +} + +// updateFabricPeer handles updating a Fabric peer node +func (h *NodeHandler) updateFabricPeer(w http.ResponseWriter, r *http.Request, nodeID int64, req *UpdateFabricPeerRequest) error { + opts := service.UpdateFabricPeerOpts{ + NodeID: nodeID, + } + + if req.ExternalEndpoint != nil { + opts.ExternalEndpoint = *req.ExternalEndpoint + } + if req.ListenAddress != nil { + opts.ListenAddress = *req.ListenAddress + } + if req.EventsAddress != nil { + opts.EventsAddress = *req.EventsAddress + } + if req.OperationsListenAddress != nil { + opts.OperationsListenAddress = *req.OperationsListenAddress + } + if req.ChaincodeAddress != nil { + opts.ChaincodeAddress = *req.ChaincodeAddress + } + if req.DomainNames != nil { + opts.DomainNames = req.DomainNames + } + if req.Env != nil { + opts.Env = req.Env + } + if req.AddressOverrides != nil { + opts.AddressOverrides = req.AddressOverrides + } + if req.Version != nil { + opts.Version = *req.Version + } + + updatedNode, err := h.service.UpdateFabricPeer(r.Context(), opts) + if err != nil { + return errors.NewInternalError("failed to update peer", err, nil) + } + + return response.WriteJSON(w, http.StatusOK, toNodeResponse(updatedNode)) +} + +// updateFabricOrderer handles updating a Fabric orderer node +func (h *NodeHandler) updateFabricOrderer(w http.ResponseWriter, r *http.Request, nodeID int64, req *UpdateFabricOrdererRequest) error { + opts := service.UpdateFabricOrdererOpts{ + NodeID: nodeID, + } + + if req.ExternalEndpoint != nil { + opts.ExternalEndpoint = *req.ExternalEndpoint + } + if req.ListenAddress != nil { + opts.ListenAddress = *req.ListenAddress + } + if req.AdminAddress != nil { + opts.AdminAddress = *req.AdminAddress + } + if req.OperationsListenAddress != nil { + opts.OperationsListenAddress = *req.OperationsListenAddress + } + if req.DomainNames != nil { + opts.DomainNames = req.DomainNames + } + if req.Env != nil { + opts.Env = req.Env + } + if req.Version != nil { + opts.Version = *req.Version + } + + updatedNode, err := h.service.UpdateFabricOrderer(r.Context(), opts) + if err != nil { + return errors.NewInternalError("failed to update orderer", err, nil) + } + + return response.WriteJSON(w, http.StatusOK, toNodeResponse(updatedNode)) +} diff --git a/pkg/nodes/http/types.go b/pkg/nodes/http/types.go index bfb9eec..66d2af6 100644 --- a/pkg/nodes/http/types.go +++ b/pkg/nodes/http/types.go @@ -4,6 +4,7 @@ import ( "time" "github.com/chainlaunch/chainlaunch/pkg/nodes/service" + "github.com/chainlaunch/chainlaunch/pkg/nodes/types" ) // NodeType represents the type of node @@ -38,19 +39,20 @@ type BaseNodeConfig struct { // FabricPeerConfig represents the configuration for a Fabric peer node type FabricPeerConfig struct { BaseNodeConfig - Name string `json:"name" validate:"required"` - OrganizationID int64 `json:"organizationId" validate:"required"` - MSPID string `json:"mspId" validate:"required"` - SignKeyID int64 `json:"signKeyId" validate:"required"` - TLSKeyID int64 `json:"tlsKeyId" validate:"required"` - ExternalEndpoint string `json:"externalEndpoint" validate:"required"` - ListenAddress string `json:"listenAddress" validate:"required"` - EventsAddress string `json:"eventsAddress" validate:"required"` - OperationsListenAddress string `json:"operationsListenAddress" validate:"required"` - ChaincodeAddress string `json:"chaincodeAddress" validate:"required"` - DomainNames []string `json:"domainNames"` - Env map[string]string `json:"env"` - Version string `json:"version"` // Fabric version to use + Name string `json:"name" validate:"required"` + OrganizationID int64 `json:"organizationId" validate:"required"` + MSPID string `json:"mspId" validate:"required"` + SignKeyID int64 `json:"signKeyId" validate:"required"` + TLSKeyID int64 `json:"tlsKeyId" validate:"required"` + ExternalEndpoint string `json:"externalEndpoint" validate:"required"` + ListenAddress string `json:"listenAddress" validate:"required"` + EventsAddress string `json:"eventsAddress" validate:"required"` + OperationsListenAddress string `json:"operationsListenAddress" validate:"required"` + ChaincodeAddress string `json:"chaincodeAddress" validate:"required"` + DomainNames []string `json:"domainNames"` + Env map[string]string `json:"env"` + Version string `json:"version"` // Fabric version to use + AddressOverrides []types.AddressOverride `json:"addressOverrides,omitempty"` } // FabricOrdererConfig represents the configuration for a Fabric orderer node @@ -150,6 +152,7 @@ type NodeResponse struct { BlockchainPlatform string `json:"platform"` NodeType string `json:"nodeType"` Status string `json:"status"` + ErrorMessage string `json:"errorMessage"` Endpoint string `json:"endpoint"` CreatedAt time.Time `json:"createdAt"` UpdatedAt time.Time `json:"updatedAt"` @@ -166,3 +169,64 @@ type ListNodesResponse struct { PageCount int `json:"pageCount"` HasNextPage bool `json:"hasNextPage"` } + +// AddressOverride represents an address override configuration for Fabric nodes +type AddressOverride struct { + From string `json:"from"` + To string `json:"to"` + TLSCACert string `json:"tlsCACert"` +} + +// UpdateNodeRequest represents the request body for updating a node +type UpdateNodeRequest struct { + // Common fields + Name *string `json:"name,omitempty"` + BlockchainPlatform *types.BlockchainPlatform `json:"blockchainPlatform,omitempty"` + + // Platform-specific configurations + FabricPeer *UpdateFabricPeerRequest `json:"fabricPeer,omitempty"` + FabricOrderer *UpdateFabricOrdererRequest `json:"fabricOrderer,omitempty"` + BesuNode *UpdateBesuNodeRequest `json:"besuNode,omitempty"` +} + +// UpdateFabricPeerRequest represents the configuration for updating a Fabric peer node +type UpdateFabricPeerRequest struct { + ExternalEndpoint *string `json:"externalEndpoint,omitempty"` + ListenAddress *string `json:"listenAddress,omitempty"` + EventsAddress *string `json:"eventsAddress,omitempty"` + OperationsListenAddress *string `json:"operationsListenAddress,omitempty"` + ChaincodeAddress *string `json:"chaincodeAddress,omitempty"` + DomainNames []string `json:"domainNames,omitempty"` + Env map[string]string `json:"env,omitempty"` + AddressOverrides []types.AddressOverride `json:"addressOverrides,omitempty"` + Version *string `json:"version,omitempty"` +} + +// UpdateFabricOrdererRequest represents the configuration for updating a Fabric orderer node +type UpdateFabricOrdererRequest struct { + ExternalEndpoint *string `json:"externalEndpoint,omitempty"` + ListenAddress *string `json:"listenAddress,omitempty"` + AdminAddress *string `json:"adminAddress,omitempty"` + OperationsListenAddress *string `json:"operationsListenAddress,omitempty"` + DomainNames []string `json:"domainNames,omitempty"` + Env map[string]string `json:"env,omitempty"` + Version *string `json:"version,omitempty"` +} + +// UpdateBesuNodeRequest represents the configuration for updating a Besu node +type UpdateBesuNodeRequest struct { + NetworkID uint `json:"networkId" validate:"required"` + P2PHost string `json:"p2pHost" validate:"required"` + P2PPort uint `json:"p2pPort" validate:"required"` + RPCHost string `json:"rpcHost" validate:"required"` + RPCPort uint `json:"rpcPort" validate:"required"` + Bootnodes []string `json:"bootnodes,omitempty"` + ExternalIP string `json:"externalIp,omitempty"` + InternalIP string `json:"internalIp,omitempty"` + Env map[string]string `json:"env,omitempty"` +} + +type BesuNodeDefaultsResponse struct { + NodeCount int `json:"nodeCount"` + Defaults []service.BesuNodeDefaults `json:"defaults"` +} diff --git a/pkg/nodes/orderer/orderer.go b/pkg/nodes/orderer/orderer.go index 17abbea..c8aa8a5 100644 --- a/pkg/nodes/orderer/orderer.go +++ b/pkg/nodes/orderer/orderer.go @@ -6,11 +6,8 @@ import ( "context" "crypto/tls" "crypto/x509" - "encoding/json" "fmt" - "io" "net" - "net/http" "os" "os/exec" "path/filepath" @@ -20,27 +17,35 @@ import ( "time" "github.com/chainlaunch/chainlaunch/pkg/binaries" + "github.com/chainlaunch/chainlaunch/pkg/config" "github.com/chainlaunch/chainlaunch/pkg/db" fabricservice "github.com/chainlaunch/chainlaunch/pkg/fabric/service" kmodels "github.com/chainlaunch/chainlaunch/pkg/keymanagement/models" keymanagement "github.com/chainlaunch/chainlaunch/pkg/keymanagement/service" "github.com/chainlaunch/chainlaunch/pkg/logger" - "github.com/chainlaunch/chainlaunch/pkg/nodes/orderer/osnadmin" "github.com/chainlaunch/chainlaunch/pkg/nodes/types" + settingsservice "github.com/chainlaunch/chainlaunch/pkg/settings/service" + "github.com/hyperledger/fabric-admin-sdk/pkg/channel" + "github.com/hyperledger/fabric-admin-sdk/pkg/identity" + "github.com/hyperledger/fabric-admin-sdk/pkg/network" + gwidentity "github.com/hyperledger/fabric-gateway/pkg/identity" + "google.golang.org/grpc" ) // LocalOrderer represents a local Fabric orderer node type LocalOrderer struct { - mspID string - db *db.Queries - opts StartOrdererOpts - mode string - org *fabricservice.OrganizationDTO - organizationID int64 - orgService *fabricservice.OrganizationService - keyService *keymanagement.KeyManagementService - nodeID int64 - logger *logger.Logger + mspID string + db *db.Queries + opts StartOrdererOpts + mode string + org *fabricservice.OrganizationDTO + organizationID int64 + orgService *fabricservice.OrganizationService + keyService *keymanagement.KeyManagementService + nodeID int64 + logger *logger.Logger + configService *config.ConfigService + settingsService *settingsservice.SettingsService } // NewLocalOrderer creates a new LocalOrderer instance @@ -55,18 +60,22 @@ func NewLocalOrderer( keyService *keymanagement.KeyManagementService, nodeID int64, logger *logger.Logger, + configService *config.ConfigService, + settingsService *settingsservice.SettingsService, ) *LocalOrderer { return &LocalOrderer{ - mspID: mspID, - db: db, - opts: opts, - mode: mode, - org: org, - organizationID: organizationID, - orgService: orgService, - keyService: keyService, - nodeID: nodeID, - logger: logger, + mspID: mspID, + db: db, + opts: opts, + mode: mode, + org: org, + organizationID: organizationID, + orgService: orgService, + keyService: keyService, + nodeID: nodeID, + logger: logger, + configService: configService, + settingsService: settingsService, } } @@ -77,7 +86,7 @@ func (o *LocalOrderer) getServiceName() string { // getLaunchdServiceName returns the launchd service name func (o *LocalOrderer) getLaunchdServiceName() string { - return fmt.Sprintf("ai.chainlaunch.orderer.%s.%s", + return fmt.Sprintf("dev.chainlaunch.orderer.%s.%s", strings.ToLower(o.org.MspID), strings.ReplaceAll(strings.ToLower(o.opts.ID), " ", "-")) } @@ -95,20 +104,15 @@ func (o *LocalOrderer) getLaunchdPlistPath() string { // GetStdOutPath returns the path to the stdout log file func (o *LocalOrderer) GetStdOutPath() string { - homeDir, _ := os.UserHomeDir() - dirPath := filepath.Join(homeDir, ".chainlaunch/orderers", + dirPath := filepath.Join(o.configService.GetDataPath(), "orderers", strings.ReplaceAll(strings.ToLower(o.opts.ID), " ", "-")) return filepath.Join(dirPath, o.getServiceName()+".log") } // findOrdererBinary finds the orderer binary in PATH func (o *LocalOrderer) findOrdererBinary() (string, error) { - homeDir, err := os.UserHomeDir() - if err != nil { - return "", fmt.Errorf("failed to get home directory: %w", err) - } - downloader, err := binaries.NewBinaryDownloader(homeDir) + downloader, err := binaries.NewBinaryDownloader(o.configService) if err != nil { return "", fmt.Errorf("failed to create binary downloader: %w", err) } @@ -120,12 +124,9 @@ func (o *LocalOrderer) findOrdererBinary() (string, error) { func (o *LocalOrderer) Start() (interface{}, error) { o.logger.Info("Starting orderer", "opts", o.opts) slugifiedID := strings.ReplaceAll(strings.ToLower(o.opts.ID), " ", "-") - homeDir, err := os.UserHomeDir() - if err != nil { - return nil, fmt.Errorf("failed to get home directory: %w", err) - } + chainlaunchDir := o.configService.GetDataPath() - dirPath := filepath.Join(homeDir, ".chainlaunch/orderers", slugifiedID) + dirPath := filepath.Join(chainlaunchDir, "orderers", slugifiedID) mspConfigPath := filepath.Join(dirPath, "config") dataConfigPath := filepath.Join(dirPath, "data") @@ -136,7 +137,7 @@ func (o *LocalOrderer) Start() (interface{}, error) { } // Build command and environment - cmd := fmt.Sprintf("%s", ordererBinary) + cmd := ordererBinary env := o.buildOrdererEnvironment(mspConfigPath) o.logger.Debug("Starting orderer", @@ -202,7 +203,7 @@ func (o *LocalOrderer) buildOrdererEnvironment(mspConfigPath string) map[string] env["ORDERER_GENERAL_TLS_CERTIFICATE"] = filepath.Join(mspConfigPath, "tls.crt") env["ORDERER_GENERAL_TLS_PRIVATEKEY"] = filepath.Join(mspConfigPath, "tls.key") env["ORDERER_GENERAL_TLS_ROOTCAS"] = filepath.Join(mspConfigPath, "tlscacerts/cacert.pem") - env["ORDERER_ADMIN_LISTENADDRESS"] = o.opts.AdminAddress + env["ORDERER_ADMIN_LISTENADDRESS"] = o.opts.AdminListenAddress env["ORDERER_GENERAL_LISTENADDRESS"] = strings.Split(o.opts.ListenAddress, ":")[0] env["ORDERER_OPERATIONS_LISTENADDRESS"] = o.opts.OperationsListenAddress env["ORDERER_GENERAL_LOCALMSPID"] = o.mspID @@ -221,6 +222,10 @@ func (o *LocalOrderer) buildOrdererEnvironment(mspConfigPath string) map[string] return env } +func (o *LocalOrderer) getLogPath() string { + return o.GetStdOutPath() +} + // TailLogs tails the logs of the orderer service func (o *LocalOrderer) TailLogs(ctx context.Context, tail int, follow bool) (<-chan string, error) { logChan := make(chan string, 100) @@ -429,13 +434,9 @@ func (o *LocalOrderer) Init() (interface{}, error) { } // Create directory structure - homeDir, err := os.UserHomeDir() - if err != nil { - return nil, fmt.Errorf("failed to get home directory: %w", err) - } slugifiedID := strings.ReplaceAll(strings.ToLower(o.opts.ID), " ", "-") - dirPath := filepath.Join(homeDir, ".chainlaunch", "orderers", slugifiedID) + dirPath := filepath.Join(o.configService.GetDataPath(), "orderers", slugifiedID) dataConfigPath := filepath.Join(dirPath, "data") mspConfigPath := filepath.Join(dirPath, "config") @@ -465,7 +466,7 @@ func (o *LocalOrderer) Init() (interface{}, error) { OrganizationID: o.organizationID, MSPID: o.mspID, ListenAddress: o.opts.ListenAddress, - AdminAddress: o.opts.AdminAddress, + AdminAddress: o.opts.AdminListenAddress, OperationsListenAddress: o.opts.OperationsListenAddress, ExternalEndpoint: o.opts.ExternalEndpoint, DomainNames: o.opts.DomainNames, @@ -900,7 +901,7 @@ Consensus: ListenAddress: strings.Split(o.opts.ListenAddress, ":")[0], ListenPort: strings.Split(o.opts.ListenAddress, ":")[1], OperationsListenAddress: o.opts.OperationsListenAddress, - AdminAddress: o.opts.AdminAddress, + AdminAddress: o.opts.AdminListenAddress, DataPath: dataConfigPath, MSPID: o.mspID, } @@ -950,27 +951,13 @@ func (o *LocalOrderer) JoinChannel(genesisBlock []byte) error { if !ok { return fmt.Errorf("couldn't append certs") } - ordererAdminUrl := fmt.Sprintf("https://%s", strings.Replace(o.opts.AdminAddress, "0.0.0.0", "127.0.0.1", 1)) - chResponse, err := osnadmin.Join(ordererAdminUrl, genesisBlock, certPool, adminTlsCertX509) - if err != nil { - return err - } - if chResponse.StatusCode == 405 { - return fmt.Errorf("orderer already joined the channel") - } - responseData, err := io.ReadAll(chResponse.Body) - if err != nil { - return err - } - if chResponse.StatusCode != 201 { - return fmt.Errorf("error joining orderer to channel: %d", chResponse.StatusCode) - } + ordererAdminUrl := fmt.Sprintf("https://%s", strings.Replace(o.opts.AdminListenAddress, "0.0.0.0", "127.0.0.1", 1)) - var response osnadmin.ChannelInfo - err = json.Unmarshal(responseData, &response) + channelInfo, err := channel.JoinOrderer(ordererAdminUrl, genesisBlock, certPool, adminTlsCertX509) if err != nil { - return err + return fmt.Errorf("failed to join orderer to channel: %w", err) } + o.logger.Info("Successfully joined orderer to channel", "orderer", o.opts.ID, "channel", channelInfo.Name) return nil } @@ -1023,19 +1010,313 @@ func (o *LocalOrderer) LeaveChannel(channelID string) error { if err != nil { return fmt.Errorf("failed to load client certificate: %w", err) } - adminAddress := strings.Replace(o.opts.AdminAddress, "0.0.0.0", "127.0.0.1", 1) + adminAddress := strings.Replace(o.opts.AdminListenAddress, "0.0.0.0", "127.0.0.1", 1) // Call osnadmin Remove API - resp, err := osnadmin.Remove(fmt.Sprintf("https://%s", adminAddress), channelID, caCertPool, cert) + err = channel.RemoveChannelFromOrderer(fmt.Sprintf("https://%s", adminAddress), channelID, caCertPool, cert) if err != nil { return fmt.Errorf("failed to remove orderer from channel: %w", err) } - defer resp.Body.Close() - if resp.StatusCode != http.StatusNoContent { - body, _ := io.ReadAll(resp.Body) - return fmt.Errorf("failed to remove orderer from channel: status=%d, body=%s", resp.StatusCode, string(body)) + o.logger.Info("Successfully removed orderer from channel", "orderer", o.opts.ID, "channel", channelID) + return nil +} + +type OrdererChannel struct { + Name string `json:"name"` + BlockNum int64 `json:"blockNum"` + CreatedAt time.Time `json:"createdAt"` +} + +// GetOrdererAddress returns the orderer's external endpoint +func (o *LocalOrderer) GetOrdererAddress() string { + return o.opts.ExternalEndpoint +} + +// GetTLSRootCACert returns the TLS root CA certificate for the orderer +func (o *LocalOrderer) GetTLSRootCACert(ctx context.Context) (string, error) { + org, err := o.orgService.GetOrganization(ctx, o.organizationID) + if err != nil { + return "", fmt.Errorf("failed to get organization: %w", err) + } + return org.TlsCertificate, nil +} + +// CreateOrdererConnection creates a gRPC connection to an orderer +func (o *LocalOrderer) CreateOrdererConnection(ctx context.Context, ordererUrl string, ordererTlsCACert string) (*grpc.ClientConn, error) { + o.logger.Debug("Creating orderer connection", "url", ordererUrl) + networkNode := network.Node{ + Addr: ordererUrl, + TLSCACertByte: []byte(ordererTlsCACert), + } + conn, err := network.DialConnection(networkNode) + if err != nil { + return nil, fmt.Errorf("failed to create orderer connection: %w", err) } + return conn, nil +} - o.logger.Info("Successfully removed orderer from channel", "orderer", o.opts.ID, "channel", channelID) +// GetAdminIdentity returns the admin identity for the orderer +func (o *LocalOrderer) GetAdminIdentity(ctx context.Context) (identity.SigningIdentity, error) { + org, err := o.orgService.GetOrganization(ctx, o.organizationID) + if err != nil { + return nil, fmt.Errorf("failed to get organization: %w", err) + } + + // Get admin signing key + adminSignKeyDB, err := o.keyService.GetKey(ctx, int(org.AdminSignKeyID.Int64)) + if err != nil { + return nil, fmt.Errorf("failed to get admin signing key: %w", err) + } + adminSignCert := adminSignKeyDB.Certificate + if adminSignCert == nil { + return nil, fmt.Errorf("admin signing certificate is nil") + } + + // Get private key from key management service + privateKeyPEM, err := o.keyService.GetDecryptedPrivateKey(int(org.AdminSignKeyID.Int64)) + if err != nil { + return nil, fmt.Errorf("failed to get private key: %w", err) + } + + cert, err := gwidentity.CertificateFromPEM([]byte(*adminSignCert)) + if err != nil { + return nil, fmt.Errorf("failed to read certificate: %w", err) + } + + privateKey, err := gwidentity.PrivateKeyFromPEM([]byte(privateKeyPEM)) + if err != nil { + return nil, fmt.Errorf("failed to read private key: %w", err) + } + + id, err := identity.NewPrivateKeySigningIdentity(org.MspID, cert, privateKey) + if err != nil { + return nil, fmt.Errorf("failed to create identity: %w", err) + } + + return id, nil +} + +// GetChannels returns a list of channels the orderer is participating in +func (o *LocalOrderer) GetChannels(ctx context.Context) ([]OrdererChannel, error) { + // Get organization + org, err := o.orgService.GetOrganization(ctx, o.organizationID) + if err != nil { + return nil, fmt.Errorf("failed to get organization: %w", err) + } + + // Get admin TLS credentials + adminTlsKeyDB, err := o.keyService.GetKey(ctx, int(org.AdminTlsKeyID.Int64)) + if err != nil { + return nil, fmt.Errorf("failed to get admin TLS key: %w", err) + } + adminTlsCert := adminTlsKeyDB.Certificate + if adminTlsCert == nil { + return nil, fmt.Errorf("admin TLS certificate is nil") + } + if *adminTlsCert == "" { + return nil, fmt.Errorf("admin TLS certificate is empty") + } + adminTlsPK, err := o.keyService.GetDecryptedPrivateKey(int(org.AdminTlsKeyID.Int64)) + if err != nil { + return nil, fmt.Errorf("failed to get admin TLS private key: %w", err) + } + + // Create client certificate + cert, err := tls.X509KeyPair([]byte(*adminTlsCert), []byte(adminTlsPK)) + if err != nil { + return nil, fmt.Errorf("failed to load client certificate: %w", err) + } + + // Create CA cert pool + certPool := x509.NewCertPool() + ok := certPool.AppendCertsFromPEM([]byte(org.TlsCertificate)) + if !ok { + return nil, fmt.Errorf("failed to append TLS root certificate to CA cert pool") + } + + // Call osnadmin List API + adminAddress := strings.Replace(o.opts.AdminListenAddress, "0.0.0.0", "127.0.0.1", 1) + channelList, err := channel.ListChannel(fmt.Sprintf("https://%s", adminAddress), certPool, cert) + if err != nil { + return nil, fmt.Errorf("failed to list channels: %w", err) + } + + // Convert to service.Channel format + var channels []OrdererChannel + for _, ch := range channelList.Channels { + blockInfo, err := channel.ListSingleChannel(fmt.Sprintf("https://%s", adminAddress), ch.Name, certPool, cert) + if err != nil { + return nil, fmt.Errorf("failed to get block height for channel: %w", err) + } + channels = append(channels, OrdererChannel{ + Name: ch.Name, + BlockNum: int64(blockInfo.Height), + CreatedAt: time.Now(), // We don't have the actual creation time + }) + } + + return channels, nil +} + +// RenewCertificates renews the orderer's TLS and signing certificates +func (o *LocalOrderer) RenewCertificates(ordererDeploymentConfig *types.FabricOrdererDeploymentConfig) error { + ctx := context.Background() + o.logger.Info("Starting certificate renewal for orderer", "ordererID", o.opts.ID) + + // Stop the orderer before renewing certificates + if err := o.Stop(); err != nil { + return fmt.Errorf("failed to stop orderer before certificate renewal: %w", err) + } + o.logger.Info("Successfully stopped orderer before certificate renewal") + + // Get organization details + org, err := o.orgService.GetOrganization(ctx, o.organizationID) + if err != nil { + return fmt.Errorf("failed to get organization: %w", err) + } + + if ordererDeploymentConfig.SignKeyID == 0 || ordererDeploymentConfig.TLSKeyID == 0 { + return fmt.Errorf("orderer node does not have required key IDs") + } + + // Get the CA certificates + signCAKey, err := o.keyService.GetKey(ctx, int(org.SignKeyID.Int64)) + if err != nil { + return fmt.Errorf("failed to get sign CA key: %w", err) + } + + tlsCAKey, err := o.keyService.GetKey(ctx, int(org.TlsRootKeyID.Int64)) + if err != nil { + return fmt.Errorf("failed to get TLS CA key: %w", err) + } + + // In case the sign key is not signed by the CA, set the signing key ID to the CA key ID + signKeyDB, err := o.keyService.GetKey(ctx, int(ordererDeploymentConfig.SignKeyID)) + if err != nil { + return fmt.Errorf("failed to get sign private key: %w", err) + } + if signKeyDB.SigningKeyID == nil || *signKeyDB.SigningKeyID == 0 { + // Set the signing key ID to the organization's sign CA key ID + err = o.keyService.SetSigningKeyIDForKey(ctx, int(ordererDeploymentConfig.SignKeyID), int(signCAKey.ID)) + if err != nil { + return fmt.Errorf("failed to set signing key ID for sign key: %w", err) + } + } + + tlsKeyDB, err := o.keyService.GetKey(ctx, int(ordererDeploymentConfig.TLSKeyID)) + if err != nil { + return fmt.Errorf("failed to get TLS private key: %w", err) + } + + if tlsKeyDB.SigningKeyID == nil || *tlsKeyDB.SigningKeyID == 0 { + // Set the signing key ID to the organization's sign CA key ID + err = o.keyService.SetSigningKeyIDForKey(ctx, int(ordererDeploymentConfig.TLSKeyID), int(tlsCAKey.ID)) + if err != nil { + return fmt.Errorf("failed to set signing key ID for TLS key: %w", err) + } + } + + // Renew signing certificate + validFor := kmodels.Duration(time.Hour * 24 * 365) // 1 year validity + _, err = o.keyService.RenewCertificate(ctx, int(ordererDeploymentConfig.SignKeyID), kmodels.CertificateRequest{ + CommonName: o.opts.ID, + Organization: []string{org.MspID}, + OrganizationalUnit: []string{"orderer"}, + DNSNames: []string{o.opts.ID}, + IsCA: false, + ValidFor: validFor, + KeyUsage: x509.KeyUsageCertSign, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, + }) + if err != nil { + return fmt.Errorf("failed to renew signing certificate: %w", err) + } + + // Renew TLS certificate + domainNames := o.opts.DomainNames + var ipAddresses []net.IP + var domains []string + + // Ensure localhost and 127.0.0.1 are included + hasLocalhost := false + hasLoopback := false + for _, domain := range domainNames { + if domain == "localhost" { + hasLocalhost = true + domains = append(domains, domain) + continue + } + if domain == "127.0.0.1" { + hasLoopback = true + ipAddresses = append(ipAddresses, net.ParseIP(domain)) + continue + } + if ip := net.ParseIP(domain); ip != nil { + ipAddresses = append(ipAddresses, ip) + } else { + domains = append(domains, domain) + } + } + if !hasLocalhost { + domains = append(domains, "localhost") + } + if !hasLoopback { + ipAddresses = append(ipAddresses, net.ParseIP("127.0.0.1")) + } + + _, err = o.keyService.RenewCertificate(ctx, int(ordererDeploymentConfig.TLSKeyID), kmodels.CertificateRequest{ + CommonName: o.opts.ID, + Organization: []string{org.MspID}, + OrganizationalUnit: []string{"orderer"}, + DNSNames: domains, + IPAddresses: ipAddresses, + IsCA: false, + ValidFor: validFor, + KeyUsage: x509.KeyUsageCertSign, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, + }) + if err != nil { + return fmt.Errorf("failed to renew TLS certificate: %w", err) + } + + // Get the private keys + signKey, err := o.keyService.GetDecryptedPrivateKey(int(ordererDeploymentConfig.SignKeyID)) + if err != nil { + return fmt.Errorf("failed to get sign private key: %w", err) + } + + tlsKey, err := o.keyService.GetDecryptedPrivateKey(int(ordererDeploymentConfig.TLSKeyID)) + if err != nil { + return fmt.Errorf("failed to get TLS private key: %w", err) + } + + // Update the certificates in the MSP directory + slugifiedID := strings.ReplaceAll(strings.ToLower(o.opts.ID), " ", "-") + dirPath := filepath.Join(o.configService.GetDataPath(), "orderers", slugifiedID) + mspConfigPath := filepath.Join(dirPath, "config") + + err = o.writeCertificatesAndKeys( + mspConfigPath, + tlsKeyDB, + signKeyDB, + tlsKey, + signKey, + signCAKey, + tlsCAKey, + ) + if err != nil { + return fmt.Errorf("failed to write renewed certificates: %w", err) + } + + o.logger.Info("Successfully renewed orderer certificates", "ordererID", o.opts.ID) + o.logger.Info("Starting orderer after certificate renewal") + + // Start the orderer with renewed certificates + _, err = o.Start() + if err != nil { + return fmt.Errorf("failed to start orderer after certificate renewal: %w", err) + } + + o.logger.Info("Successfully started orderer after certificate renewal") return nil } diff --git a/pkg/nodes/orderer/service.go b/pkg/nodes/orderer/service.go index 2ef9b9c..dddbf08 100644 --- a/pkg/nodes/orderer/service.go +++ b/pkg/nodes/orderer/service.go @@ -229,11 +229,25 @@ func (o *LocalOrderer) stopLaunchdService() error { // execSystemctl executes a systemctl command func (o *LocalOrderer) execSystemctl(command string, args ...string) error { - cmdArgs := append([]string{"systemctl", command}, args...) - cmd := exec.Command("sudo", cmdArgs...) - if err := cmd.Run(); err != nil { - return fmt.Errorf("systemctl %s failed: %w", command, err) + cmdArgs := append([]string{command}, args...) + + // Check if sudo is available + sudoPath, err := exec.LookPath("sudo") + if err == nil { + // sudo is available, use it + cmdArgs = append([]string{"systemctl"}, cmdArgs...) + cmd := exec.Command(sudoPath, cmdArgs...) + if err := cmd.Run(); err != nil { + return fmt.Errorf("systemctl %s failed: %w", command, err) + } + } else { + // sudo is not available, run directly + cmd := exec.Command("systemctl", cmdArgs...) + if err := cmd.Run(); err != nil { + return fmt.Errorf("systemctl %s failed: %w", command, err) + } } + return nil } diff --git a/pkg/nodes/orderer/types.go b/pkg/nodes/orderer/types.go index ea3d190..541a00d 100644 --- a/pkg/nodes/orderer/types.go +++ b/pkg/nodes/orderer/types.go @@ -1,15 +1,25 @@ package orderer +import "github.com/chainlaunch/chainlaunch/pkg/nodes/types" + // StartOrdererOpts represents the options for starting an orderer type StartOrdererOpts struct { - ID string `json:"id"` - ListenAddress string `json:"listenAddress"` - AdminAddress string `json:"adminAddress"` - OperationsListenAddress string `json:"operationsListenAddress"` - ExternalEndpoint string `json:"externalEndpoint"` - DomainNames []string `json:"domainNames"` - Env map[string]string `json:"env"` - Version string `json:"version"` // Fabric version to use + ID string `json:"id"` + ListenAddress string `json:"listenAddress"` + OperationsListenAddress string `json:"operationsListenAddress"` + AdminListenAddress string `json:"adminListenAddress"` + ExternalEndpoint string `json:"externalEndpoint"` + DomainNames []string `json:"domainNames"` + Env map[string]string `json:"env"` + Version string `json:"version"` // Fabric version to use + AddressOverrides []types.AddressOverride `json:"addressOverrides,omitempty"` +} + +// AddressOverride represents an address override configuration +type AddressOverride struct { + From string `json:"from"` + To string `json:"to"` + TLSCACert string `json:"tlsCACert"` } // OrdererConfig represents the configuration for an orderer node @@ -17,7 +27,7 @@ type OrdererConfig struct { Mode string `json:"mode"` ListenAddress string `json:"listenAddress"` OperationsListenAddress string `json:"operationsListenAddress"` - AdminAddress string `json:"adminAddress"` + AdminListenAddress string `json:"adminListenAddress"` ExternalEndpoint string `json:"externalEndpoint"` SignCert string `json:"signCert"` SignCACert string `json:"signCACert"` @@ -40,3 +50,10 @@ type StartDockerResponse struct { Mode string `json:"mode"` ContainerName string `json:"containerName"` } + +// BlockInfo represents information about a block in the orderer +type BlockInfo struct { + Height uint64 `json:"height"` + CurrentBlockHash string `json:"currentBlockHash"` + PreviousBlockHash string `json:"previousBlockHash"` +} diff --git a/pkg/nodes/peer/peer.go b/pkg/nodes/peer/peer.go index 828f87c..fd9c8aa 100644 --- a/pkg/nodes/peer/peer.go +++ b/pkg/nodes/peer/peer.go @@ -4,7 +4,9 @@ import ( "bufio" "bytes" "context" + "crypto/tls" "crypto/x509" + "errors" "fmt" "net" "os" @@ -18,1735 +20,1930 @@ import ( // add sprig/v3 "github.com/Masterminds/sprig/v3" "github.com/golang/protobuf/proto" - "github.com/hyperledger/fabric-protos-go/common" - cb "github.com/hyperledger/fabric-protos-go/common" - "github.com/hyperledger/fabric-sdk-go/pkg/client/resmgmt" - "github.com/hyperledger/fabric-sdk-go/pkg/common/providers/msp" - "github.com/hyperledger/fabric-sdk-go/pkg/core/config" - "github.com/hyperledger/fabric-sdk-go/pkg/core/cryptosuite" - "github.com/hyperledger/fabric-sdk-go/pkg/core/cryptosuite/bccsp/sw" - "github.com/hyperledger/fabric-sdk-go/pkg/fab" - "github.com/hyperledger/fabric-sdk-go/pkg/fab/resource" - "github.com/hyperledger/fabric-sdk-go/pkg/fabsdk" - mspimpl "github.com/hyperledger/fabric-sdk-go/pkg/msp" - + "github.com/hyperledger/fabric-admin-sdk/pkg/channel" + "github.com/hyperledger/fabric-admin-sdk/pkg/identity" + "github.com/hyperledger/fabric-admin-sdk/pkg/network" + "github.com/hyperledger/fabric-gateway/pkg/client" + gwidentity "github.com/hyperledger/fabric-gateway/pkg/identity" + cb "github.com/hyperledger/fabric-protos-go-apiv2/common" + "github.com/hyperledger/fabric-protos-go-apiv2/orderer" + "google.golang.org/grpc" + + "github.com/chainlaunch/chainlaunch/internal/protoutil" "github.com/chainlaunch/chainlaunch/pkg/binaries" + "github.com/chainlaunch/chainlaunch/pkg/config" "github.com/chainlaunch/chainlaunch/pkg/db" fabricservice "github.com/chainlaunch/chainlaunch/pkg/fabric/service" kmodels "github.com/chainlaunch/chainlaunch/pkg/keymanagement/models" keymanagement "github.com/chainlaunch/chainlaunch/pkg/keymanagement/service" "github.com/chainlaunch/chainlaunch/pkg/logger" "github.com/chainlaunch/chainlaunch/pkg/nodes/types" + settingsservice "github.com/chainlaunch/chainlaunch/pkg/settings/service" ) -// LocalPeer represents a local Fabric peer node -type LocalPeer struct { - mspID string - db *db.Queries - opts StartPeerOpts - mode string - org *fabricservice.OrganizationDTO - organizationID int64 - orgService *fabricservice.OrganizationService - keyService *keymanagement.KeyManagementService - nodeID int64 - logger *logger.Logger -} - -// NewLocalPeer creates a new LocalPeer instance -func NewLocalPeer( - mspID string, - db *db.Queries, - opts StartPeerOpts, - mode string, - org *fabricservice.OrganizationDTO, - organizationID int64, - orgService *fabricservice.OrganizationService, - keyService *keymanagement.KeyManagementService, - nodeID int64, - logger *logger.Logger, -) *LocalPeer { - return &LocalPeer{ - mspID: mspID, - db: db, - opts: opts, - mode: mode, - org: org, - organizationID: organizationID, - orgService: orgService, - keyService: keyService, - nodeID: nodeID, - logger: logger, - } -} - -// getServiceName returns the systemd service name -func (p *LocalPeer) getServiceName() string { - return fmt.Sprintf("fabric-peer-%s", strings.ReplaceAll(strings.ToLower(p.opts.ID), " ", "-")) -} - -// getLaunchdServiceName returns the launchd service name -func (p *LocalPeer) getLaunchdServiceName() string { - return fmt.Sprintf("ai.chainlaunch.peer.%s.%s", - strings.ToLower(p.org.MspID), - strings.ReplaceAll(strings.ToLower(p.opts.ID), " ", "-")) -} - -// getServiceFilePath returns the systemd service file path -func (p *LocalPeer) getServiceFilePath() string { - return fmt.Sprintf("/etc/systemd/system/%s.service", p.getServiceName()) -} - -// getLaunchdPlistPath returns the launchd plist file path -func (p *LocalPeer) getLaunchdPlistPath() string { - homeDir, _ := os.UserHomeDir() - return filepath.Join(homeDir, "Library/LaunchAgents", p.getLaunchdServiceName()+".plist") +type AddressOverridePath struct { + From string + To string + TLSCAPath string } -// GetStdOutPath returns the path to the stdout log file -func (p *LocalPeer) GetStdOutPath() string { - homeDir, _ := os.UserHomeDir() - dirPath := filepath.Join(homeDir, ".chainlaunch/peers", - strings.ReplaceAll(strings.ToLower(p.opts.ID), " ", "-")) - return filepath.Join(dirPath, p.getServiceName()+".log") -} +const coreYamlTemplate = ` +# Copyright IBM Corp. All Rights Reserved. +# +# SPDX-License-Identifier: Apache-2.0 +# -func (p *LocalPeer) getPeerPath() string { - homeDir, _ := os.UserHomeDir() - return filepath.Join(homeDir, ".chainlaunch/peers", - strings.ReplaceAll(strings.ToLower(p.opts.ID), " ", "-")) -} +############################################################################### +# +# Peer section +# +############################################################################### +peer: -// getContainerName returns the docker container name -func (p *LocalPeer) getContainerName() (string, error) { - org, err := p.orgService.GetOrganization(context.Background(), p.organizationID) - if err != nil { - return "", fmt.Errorf("failed to get organization: %w", err) - } - return fmt.Sprintf("%s-%s", - strings.ToLower(org.MspID), - strings.ReplaceAll(strings.ToLower(p.opts.ID), " ", "-")), nil -} + # The peer id provides a name for this peer instance and is used when + # naming docker resources. + id: jdoe -// findPeerBinary finds the peer binary in PATH -func (p *LocalPeer) findPeerBinary() (string, error) { - homeDir, err := os.UserHomeDir() - if err != nil { - return "", fmt.Errorf("failed to get home directory: %w", err) - } + # The networkId allows for logical separation of networks and is used when + # naming docker resources. + networkId: dev - downloader, err := binaries.NewBinaryDownloader(homeDir) - if err != nil { - return "", fmt.Errorf("failed to create binary downloader: %w", err) - } + # The Address at local network interface this Peer will listen on. + # By default, it will listen on all network interfaces + listenAddress: 0.0.0.0:7051 - return downloader.GetBinaryPath(binaries.PeerBinary, p.opts.Version) -} + # The endpoint this peer uses to listen for inbound chaincode connections. + # If this is commented-out, the listen address is selected to be + # the peer's address (see below) with port 7052 + # chaincodeListenAddress: 0.0.0.0:7052 -// Init initializes the peer configuration -func (p *LocalPeer) Init() (types.NodeDeploymentConfig, error) { - ctx := context.Background() - // Get node from database - node, err := p.db.GetNode(ctx, p.nodeID) - if err != nil { - return nil, fmt.Errorf("failed to get node: %w", err) - } + # The endpoint the chaincode for this peer uses to connect to the peer. + # If this is not specified, the chaincodeListenAddress address is selected. + # And if chaincodeListenAddress is not specified, address is selected from + # peer address (see below). If specified peer address is invalid then it + # will fallback to the auto detected IP (local IP) regardless of the peer + # addressAutoDetect value. + # chaincodeAddress: 0.0.0.0:7052 - p.logger.Info("Initializing peer", - "opts", p.opts, - "node", node, - "orgID", p.organizationID, - "nodeID", p.nodeID, - ) + # When used as peer config, this represents the endpoint to other peers + # in the same organization. For peers in other organization, see + # gossip.externalEndpoint for more info. + # When used as CLI config, this means the peer's endpoint to interact with + address: 0.0.0.0:7051 - // Get organization - org, err := p.orgService.GetOrganization(ctx, p.organizationID) - if err != nil { - return nil, fmt.Errorf("failed to get organization: %w", err) - } + # Whether the Peer should programmatically determine its address + # This case is useful for docker containers. + # When set to true, will override peer address. + addressAutoDetect: false - signCAKeyDB, err := p.keyService.GetKey(ctx, int(org.SignKeyID.Int64)) - if err != nil { - return nil, fmt.Errorf("failed to retrieve sign CA cert: %w", err) - } + # Keepalive settings for peer server and clients + keepalive: + # Interval is the duration after which if the server does not see + # any activity from the client it pings the client to see if it's alive + interval: 7200s + # Timeout is the duration the server waits for a response + # from the client after sending a ping before closing the connection + timeout: 20s + # MinInterval is the minimum permitted time between client pings. + # If clients send pings more frequently, the peer server will + # disconnect them + minInterval: 60s + # Client keepalive settings for communicating with other peer nodes + client: + # Interval is the time between pings to peer nodes. This must + # greater than or equal to the minInterval specified by peer + # nodes + interval: 60s + # Timeout is the duration the client waits for a response from + # peer nodes before closing the connection + timeout: 20s + # DeliveryClient keepalive settings for communication with ordering + # nodes. + deliveryClient: + # Interval is the time between pings to ordering nodes. This must + # greater than or equal to the minInterval specified by ordering + # nodes. + interval: 60s + # Timeout is the duration the client waits for a response from + # ordering nodes before closing the connection + timeout: 20s - tlsCAKeyDB, err := p.keyService.GetKey(ctx, int(org.TlsRootKeyID.Int64)) - if err != nil { - return nil, fmt.Errorf("failed to retrieve TLS CA cert: %w", err) - } - isCA := 0 - description := "Sign key for " + p.opts.ID - curveP256 := kmodels.ECCurveP256 - providerID := 1 - // Create Sign Key - signKeyDB, err := p.keyService.CreateKey(ctx, kmodels.CreateKeyRequest{ - Algorithm: kmodels.KeyAlgorithmEC, - Name: p.opts.ID, - IsCA: &isCA, - Description: &description, - Curve: &curveP256, - ProviderID: &providerID, - }, int(org.SignKeyID.Int64)) - if err != nil { - return nil, fmt.Errorf("failed to create sign key: %w", err) - } + # Gossip related configuration + gossip: + # Bootstrap set to initialize gossip with. + # This is a list of other peers that this peer reaches out to at startup. + # Important: The endpoints here have to be endpoints of peers in the same + # organization, because the peer would refuse connecting to these endpoints + # unless they are in the same organization as the peer. + bootstrap: 127.0.0.1:7051 - // Sign Sign Key - signKeyDB, err = p.keyService.SignCertificate(ctx, signKeyDB.ID, signCAKeyDB.ID, kmodels.CertificateRequest{ - CommonName: p.opts.ID, - Organization: []string{org.MspID}, - OrganizationalUnit: []string{"peer"}, - DNSNames: []string{p.opts.ID}, - IsCA: true, - KeyUsage: x509.KeyUsageCertSign, - ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, - }) - if err != nil { - return nil, fmt.Errorf("failed to sign sign key: %w", err) - } + # NOTE: orgLeader and useLeaderElection parameters are mutual exclusive. + # Setting both to true would result in the termination of the peer + # since this is undefined state. If the peers are configured with + # useLeaderElection=false, make sure there is at least 1 peer in the + # organization that its orgLeader is set to true. - signKey, err := p.keyService.GetDecryptedPrivateKey(int(signKeyDB.ID)) - if err != nil { - return nil, fmt.Errorf("failed to get sign private key: %w", err) - } + # Defines whenever peer will initialize dynamic algorithm for + # "leader" selection, where leader is the peer to establish + # connection with ordering service and use delivery protocol + # to pull ledger blocks from ordering service. + useLeaderElection: false + # Statically defines peer to be an organization "leader", + # where this means that current peer will maintain connection + # with ordering service and disseminate block across peers in + # its own organization. Multiple peers or all peers in an organization + # may be configured as org leaders, so that they all pull + # blocks directly from ordering service. + orgLeader: true - // Create TLS key - tlsKeyDB, err := p.keyService.CreateKey(ctx, kmodels.CreateKeyRequest{ - Algorithm: kmodels.KeyAlgorithmEC, - Name: p.opts.ID, - IsCA: &isCA, - Description: &description, - Curve: &curveP256, - ProviderID: &providerID, - }, int(org.SignKeyID.Int64)) - if err != nil { - return nil, fmt.Errorf("failed to create sign key: %w", err) - } - domainNames := p.opts.DomainNames + # Interval for membershipTracker polling + membershipTrackerInterval: 5s - // Ensure localhost and 127.0.0.1 are included in domain names - hasLocalhost := false - hasLoopback := false - var ipAddresses []net.IP - var domains []string - for _, domain := range domainNames { - if domain == "localhost" { - hasLocalhost = true - domains = append(domains, domain) - continue - } - if domain == "127.0.0.1" { - hasLoopback = true - ipAddresses = append(ipAddresses, net.ParseIP(domain)) - continue - } - if ip := net.ParseIP(domain); ip != nil { - ipAddresses = append(ipAddresses, ip) - } else { - domains = append(domains, domain) - } - } - if !hasLocalhost { - domains = append(domains, "localhost") - } - if !hasLoopback { - ipAddresses = append(ipAddresses, net.ParseIP("127.0.0.1")) - } - p.opts.DomainNames = domains + # Overrides the endpoint that the peer publishes to peers + # in its organization. For peers in foreign organizations + # see 'externalEndpoint' + endpoint: + # Maximum count of blocks stored in memory + maxBlockCountToStore: 10 + # Max time between consecutive message pushes(unit: millisecond) + maxPropagationBurstLatency: 10ms + # Max number of messages stored until a push is triggered to remote peers + maxPropagationBurstSize: 10 + # Number of times a message is pushed to remote peers + propagateIterations: 1 + # Number of peers selected to push messages to + propagatePeerNum: 3 + # Determines frequency of pull phases(unit: second) + # Must be greater than digestWaitTime + responseWaitTime + pullInterval: 4s + # Number of peers to pull from + pullPeerNum: 3 + # Determines frequency of pulling state info messages from peers(unit: second) + requestStateInfoInterval: 4s + # Determines frequency of pushing state info messages to peers(unit: second) + publishStateInfoInterval: 4s + # Maximum time a stateInfo message is kept until expired + stateInfoRetentionInterval: + # Time from startup certificates are included in Alive messages(unit: second) + publishCertPeriod: 10s + # Should we skip verifying block messages or not (currently not in use) + skipBlockVerification: false + # Dial timeout(unit: second) + dialTimeout: 3s + # Connection timeout(unit: second) + connTimeout: 2s + # Buffer size of received messages + recvBuffSize: 20 + # Buffer size of sending messages + sendBuffSize: 200 + # Time to wait before pull engine processes incoming digests (unit: second) + # Should be slightly smaller than requestWaitTime + digestWaitTime: 1s + # Time to wait before pull engine removes incoming nonce (unit: milliseconds) + # Should be slightly bigger than digestWaitTime + requestWaitTime: 1500ms + # Time to wait before pull engine ends pull (unit: second) + responseWaitTime: 2s + # Alive check interval(unit: second) + aliveTimeInterval: 5s + # Alive expiration timeout(unit: second) + aliveExpirationTimeout: 25s + # Reconnect interval(unit: second) + reconnectInterval: 25s + # Max number of attempts to connect to a peer + maxConnectionAttempts: 120 + # Message expiration factor for alive messages + msgExpirationFactor: 20 + # This is an endpoint that is published to peers outside of the organization. + # If this isn't set, the peer will not be known to other organizations. + externalEndpoint: + # Leader election service configuration + election: + # Longest time peer waits for stable membership during leader election startup (unit: second) + startupGracePeriod: 15s + # Interval gossip membership samples to check its stability (unit: second) + membershipSampleInterval: 1s + # Time passes since last declaration message before peer decides to perform leader election (unit: second) + leaderAliveThreshold: 10s + # Time between peer sends propose message and declares itself as a leader (sends declaration message) (unit: second) + leaderElectionDuration: 5s - // Sign TLS certificates - validFor := kmodels.Duration(time.Hour * 24 * 365) - tlsKeyDB, err = p.keyService.SignCertificate(ctx, tlsKeyDB.ID, tlsCAKeyDB.ID, kmodels.CertificateRequest{ - CommonName: p.opts.ID, - Organization: []string{org.MspID}, - OrganizationalUnit: []string{"peer"}, - DNSNames: domains, - IPAddresses: ipAddresses, - IsCA: true, - ValidFor: validFor, - KeyUsage: x509.KeyUsageCertSign, - ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, - }) - if err != nil { - return nil, fmt.Errorf("failed to sign TLS certificate: %w", err) - } - tlsKey, err := p.keyService.GetDecryptedPrivateKey(int(tlsKeyDB.ID)) - if err != nil { - return nil, fmt.Errorf("failed to get TLS private key: %w", err) - } - // Create directory structure - homeDir, err := os.UserHomeDir() - if err != nil { - return nil, fmt.Errorf("failed to get home directory: %w", err) - } + pvtData: + # pullRetryThreshold determines the maximum duration of time private data corresponding for a given block + # would be attempted to be pulled from peers until the block would be committed without the private data + pullRetryThreshold: 60s + # As private data enters the transient store, it is associated with the peer's ledger's height at that time. + # transientstoreMaxBlockRetention defines the maximum difference between the current ledger's height upon commit, + # and the private data residing inside the transient store that is guaranteed not to be purged. + # Private data is purged from the transient store when blocks with sequences that are multiples + # of transientstoreMaxBlockRetention are committed. + transientstoreMaxBlockRetention: 1000 + # pushAckTimeout is the maximum time to wait for an acknowledgement from each peer + # at private data push at endorsement time. + pushAckTimeout: 3s + # Block to live pulling margin, used as a buffer + # to prevent peer from trying to pull private data + # from peers that is soon to be purged in next N blocks. + # This helps a newly joined peer catch up to current + # blockchain height quicker. + btlPullMargin: 10 + # the process of reconciliation is done in an endless loop, while in each iteration reconciler tries to + # pull from the other peers the most recent missing blocks with a maximum batch size limitation. + # reconcileBatchSize determines the maximum batch size of missing private data that will be reconciled in a + # single iteration. + reconcileBatchSize: 10 + # reconcileSleepInterval determines the time reconciler sleeps from end of an iteration until the beginning + # of the next reconciliation iteration. + reconcileSleepInterval: 1m + # reconciliationEnabled is a flag that indicates whether private data reconciliation is enable or not. + reconciliationEnabled: true + # skipPullingInvalidTransactionsDuringCommit is a flag that indicates whether pulling of invalid + # transaction's private data from other peers need to be skipped during the commit time and pulled + # only through reconciler. + skipPullingInvalidTransactionsDuringCommit: false + # implicitCollectionDisseminationPolicy specifies the dissemination policy for the peer's own implicit collection. + # When a peer endorses a proposal that writes to its own implicit collection, below values override the default values + # for disseminating private data. + # Note that it is applicable to all channels the peer has joined. The implication is that requiredPeerCount has to + # be smaller than the number of peers in a channel that has the lowest numbers of peers from the organization. + implicitCollectionDisseminationPolicy: + # requiredPeerCount defines the minimum number of eligible peers to which the peer must successfully + # disseminate private data for its own implicit collection during endorsement. Default value is 0. + requiredPeerCount: 0 + # maxPeerCount defines the maximum number of eligible peers to which the peer will attempt to + # disseminate private data for its own implicit collection during endorsement. Default value is 1. + maxPeerCount: 1 - slugifiedID := strings.ReplaceAll(strings.ToLower(p.opts.ID), " ", "-") - dirPath := filepath.Join(homeDir, ".chainlaunch", "peers", slugifiedID) - dataConfigPath := filepath.Join(dirPath, "data") - mspConfigPath := filepath.Join(dirPath, "config") + # Gossip state transfer related configuration + state: + # indicates whenever state transfer is enabled or not + # default value is true, i.e. state transfer is active + # and takes care to sync up missing blocks allowing + # lagging peer to catch up to speed with rest network + enabled: false + # checkInterval interval to check whether peer is lagging behind enough to + # request blocks via state transfer from another peer. + checkInterval: 10s + # responseTimeout amount of time to wait for state transfer response from + # other peers + responseTimeout: 3s + # batchSize the number of blocks to request via state transfer from another peer + batchSize: 10 + # blockBufferSize reflects the size of the re-ordering buffer + # which captures blocks and takes care to deliver them in order + # down to the ledger layer. The actual buffer size is bounded between + # 0 and 2*blockBufferSize, each channel maintains its own buffer + blockBufferSize: 20 + # maxRetries maximum number of re-tries to ask + # for single state transfer request + maxRetries: 3 + + # TLS Settings + tls: + # Require server-side TLS + enabled: false + # Require client certificates / mutual TLS. + # Note that clients that are not configured to use a certificate will + # fail to connect to the peer. + clientAuthRequired: false + # X.509 certificate used for TLS server + cert: + file: tls/server.crt + # Private key used for TLS server (and client if clientAuthEnabled + # is set to true + key: + file: tls/server.key + # Trusted root certificate chain for tls.cert + rootcert: + file: tls/ca.crt + # Set of root certificate authorities used to verify client certificates + clientRootCAs: + files: + - tls/ca.crt + # Private key used for TLS when making client connections. If + # not set, peer.tls.key.file will be used instead + clientKey: + file: + # X.509 certificate used for TLS when making client connections. + # If not set, peer.tls.cert.file will be used instead + clientCert: + file: + + # Authentication contains configuration parameters related to authenticating + # client messages + authentication: + # the acceptable difference between the current server time and the + # client's time as specified in a client request message + timewindow: 15m + + # Path on the file system where peer will store data (eg ledger). This + # location must be access control protected to prevent unintended + # modification that might corrupt the peer operations. + fileSystemPath: {{.DataPath}} + + # BCCSP (Blockchain crypto provider): Select which crypto implementation or + # library to use + BCCSP: + Default: SW + # Settings for the SW crypto provider (i.e. when DEFAULT: SW) + SW: + # TODO: The default Hash and Security level needs refactoring to be + # fully configurable. Changing these defaults requires coordination + # SHA2 is hardcoded in several places, not only BCCSP + Hash: SHA2 + Security: 256 + # Location of Key Store + FileKeyStore: + # If "", defaults to 'mspConfigPath'/keystore + KeyStore: + # Settings for the PKCS#11 crypto provider (i.e. when DEFAULT: PKCS11) + PKCS11: + # Location of the PKCS11 module library + Library: + # Token Label + Label: + # User PIN + Pin: + Hash: + Security: + + # Path on the file system where peer will find MSP local configurations + mspConfigPath: msp + + # Identifier of the local MSP + # ----!!!!IMPORTANT!!!-!!!IMPORTANT!!!-!!!IMPORTANT!!!!---- + # Deployers need to change the value of the localMspId string. + # In particular, the name of the local MSP ID of a peer needs + # to match the name of one of the MSPs in each of the channel + # that this peer is a member of. Otherwise this peer's messages + # will not be identified as valid by other nodes. + localMspId: SampleOrg + + # CLI common client config options + client: + # connection timeout + connTimeout: 3s + + # Delivery service related config + deliveryclient: + # It sets the total time the delivery service may spend in reconnection + # attempts until its retry logic gives up and returns an error + reconnectTotalTimeThreshold: 3600s + + # It sets the delivery service <-> ordering service node connection timeout + connTimeout: 3s + + # It sets the delivery service maximal delay between consecutive retries + reConnectBackoffThreshold: 3600s + + # A list of orderer endpoint addresses which should be overridden + # when found in channel configurations. +{{- if .AddressOverrides }} + addressOverrides: +{{- range $i, $override := .AddressOverrides }} + - from: {{ $override.From }} + to: {{ $override.To }} + caCertsFile: {{ $override.TLSCAPath }} +{{- end }} +{{- else }} + addressOverrides: [] +{{- end }} + + # Type for the local MSP - by default it's of type bccsp + localMspType: bccsp + + # Used with Go profiling tools only in none production environment. In + # production, it should be disabled (eg enabled: false) + profile: + enabled: false + listenAddress: 0.0.0.0:6060 + + # Handlers defines custom handlers that can filter and mutate + # objects passing within the peer, such as: + # Auth filter - reject or forward proposals from clients + # Decorators - append or mutate the chaincode input passed to the chaincode + # Endorsers - Custom signing over proposal response payload and its mutation + # Valid handler definition contains: + # - A name which is a factory method name defined in + # core/handlers/library/library.go for statically compiled handlers + # - library path to shared object binary for pluggable filters + # Auth filters and decorators are chained and executed in the order that + # they are defined. For example: + # authFilters: + # - + # name: FilterOne + # library: /opt/lib/filter.so + # - + # name: FilterTwo + # decorators: + # - + # name: DecoratorOne + # - + # name: DecoratorTwo + # library: /opt/lib/decorator.so + # Endorsers are configured as a map that its keys are the endorsement system chaincodes that are being overridden. + # Below is an example that overrides the default ESCC and uses an endorsement plugin that has the same functionality + # as the default ESCC. + # If the 'library' property is missing, the name is used as the constructor method in the builtin library similar + # to auth filters and decorators. + # endorsers: + # escc: + # name: DefaultESCC + # library: /etc/hyperledger/fabric/plugin/escc.so + handlers: + authFilters: + - + name: DefaultAuth + - + name: ExpirationCheck # This filter checks identity x509 certificate expiration + decorators: + - + name: DefaultDecorator + endorsers: + escc: + name: DefaultEndorsement + library: + validators: + vscc: + name: DefaultValidation + library: + + # library: /etc/hyperledger/fabric/plugin/escc.so + # Number of goroutines that will execute transaction validation in parallel. + # By default, the peer chooses the number of CPUs on the machine. Set this + # variable to override that choice. + # NOTE: overriding this value might negatively influence the performance of + # the peer so please change this value only if you know what you're doing + validatorPoolSize: + + # The discovery service is used by clients to query information about peers, + # such as - which peers have joined a certain channel, what is the latest + # channel config, and most importantly - given a chaincode and a channel, + # what possible sets of peers satisfy the endorsement policy. + discovery: + enabled: true + # Whether the authentication cache is enabled or not. + authCacheEnabled: true + # The maximum size of the cache, after which a purge takes place + authCacheMaxSize: 1000 + # The proportion (0 to 1) of entries that remain in the cache after the cache is purged due to overpopulation + authCachePurgeRetentionRatio: 0.75 + # Whether to allow non-admins to perform non channel scoped queries. + # When this is false, it means that only peer admins can perform non channel scoped queries. + orgMembersAllowedAccess: false + + # Limits is used to configure some internal resource limits. + limits: + # Concurrency limits the number of concurrently running requests to a service on each peer. + # Currently this option is only applied to endorser service and deliver service. + # When the property is missing or the value is 0, the concurrency limit is disabled for the service. + concurrency: + # endorserService limits concurrent requests to endorser service that handles chaincode deployment, query and invocation, + # including both user chaincodes and system chaincodes. + endorserService: 2500 + # deliverService limits concurrent event listeners registered to deliver service for blocks and transaction events. + deliverService: 2500 + +############################################################################### +# +# VM section +# +############################################################################### +vm: + + # Endpoint of the vm management system. For docker can be one of the following in general + # unix:///var/run/docker.sock + # http://localhost:2375 + # https://localhost:2376 + endpoint: "" - // Create directories - if err := os.MkdirAll(dataConfigPath, 0755); err != nil { - return nil, fmt.Errorf("failed to create data directory: %w", err) - } - if err := os.MkdirAll(mspConfigPath, 0755); err != nil { - return nil, fmt.Errorf("failed to create msp directory: %w", err) - } + # settings for docker vms + docker: + tls: + enabled: false + ca: + file: docker/ca.crt + cert: + file: docker/tls.crt + key: + file: docker/tls.key - // Write certificates and keys - if err := p.writeCertificatesAndKeys(mspConfigPath, tlsKeyDB, signKeyDB, tlsKey, signKey, signCAKeyDB, tlsCAKeyDB); err != nil { - return nil, fmt.Errorf("failed to write certificates and keys: %w", err) - } + # Enables/disables the standard out/err from chaincode containers for + # debugging purposes + attachStdout: false - // Create external builders - if err := p.setupExternalBuilders(mspConfigPath); err != nil { - return nil, fmt.Errorf("failed to setup external builders: %w", err) - } + # Parameters on creating docker container. + # Container may be efficiently created using ipam & dns-server for cluster + # NetworkMode - sets the networking mode for the container. Supported + # Dns - a list of DNS servers for the container to use. + # Docker Host Config are not supported and will not be used if set. + # LogConfig - sets the logging driver (Type) and related options + # (Config) for Docker. For more info, + # https://docs.docker.com/engine/admin/logging/overview/ + # Note: Set LogConfig using Environment Variables is not supported. + hostConfig: + NetworkMode: host + Dns: + # - 192.168.0.1 + LogConfig: + Type: json-file + Config: + max-size: "50m" + max-file: "5" + Memory: 2147483648 - // Write config files - if err := p.writeConfigFiles(mspConfigPath, dataConfigPath); err != nil { - return nil, fmt.Errorf("failed to write config files: %w", err) - } +############################################################################### +# +# Chaincode section +# +############################################################################### +chaincode: - return &types.FabricPeerDeploymentConfig{ - BaseDeploymentConfig: types.BaseDeploymentConfig{ - Type: "fabric-peer", - Mode: p.mode, - }, - OrganizationID: p.organizationID, - MSPID: p.mspID, - SignKeyID: int64(signKeyDB.ID), - TLSKeyID: int64(tlsKeyDB.ID), - ListenAddress: p.opts.ListenAddress, - ChaincodeAddress: p.opts.ChaincodeAddress, - EventsAddress: p.opts.EventsAddress, - OperationsListenAddress: p.opts.OperationsListenAddress, - ExternalEndpoint: p.opts.ExternalEndpoint, - DomainNames: p.opts.DomainNames, - SignCert: *signKeyDB.Certificate, - TLSCert: *tlsKeyDB.Certificate, - CACert: *signCAKeyDB.Certificate, - TLSCACert: *tlsCAKeyDB.Certificate, - }, nil -} + # The id is used by the Chaincode stub to register the executing Chaincode + # ID with the Peer and is generally supplied through ENV variables + id: + path: + name: -// Start starts the peer node -func (p *LocalPeer) Start() (interface{}, error) { - p.logger.Info("Starting peer", "opts", p.opts) - slugifiedID := strings.ReplaceAll(strings.ToLower(p.opts.ID), " ", "-") - homeDir, err := os.UserHomeDir() - if err != nil { - return nil, fmt.Errorf("failed to get home directory: %w", err) - } + # Generic builder environment, suitable for most chaincode types + builder: $(DOCKER_NS)/fabric-ccenv:$(TWO_DIGIT_VERSION) - dirPath := filepath.Join(homeDir, ".chainlaunch/peers", slugifiedID) - mspConfigPath := filepath.Join(dirPath, "config") - dataConfigPath := filepath.Join(dirPath, "data") + pull: false - // Find peer binary - peerBinary, err := p.findPeerBinary() - if err != nil { - return nil, fmt.Errorf("failed to find peer binary: %w", err) - } + golang: + # golang will never need more than baseos + runtime: $(DOCKER_NS)/fabric-baseos:$(TWO_DIGIT_VERSION) - // Build command and environment - cmd := fmt.Sprintf("%s node start", peerBinary) - env := p.buildPeerEnvironment(mspConfigPath) + # whether or not golang chaincode should be linked dynamically + dynamicLink: false - p.logger.Debug("Starting peer", - "mode", p.mode, - "cmd", cmd, - "env", env, - "dirPath", dirPath, - ) + java: + # This is an image based on java:openjdk-8 with addition compiler + # tools added for java shim layer packaging. + # This image is packed with shim layer libraries that are necessary + # for Java chaincode runtime. + runtime: $(DOCKER_NS)/fabric-javaenv:$(TWO_DIGIT_VERSION) - switch p.mode { - case "service": - return p.startService(cmd, env, dirPath) - case "docker": - return p.startDocker(env, mspConfigPath, dataConfigPath) - default: - return nil, fmt.Errorf("invalid mode: %s", p.mode) - } -} + node: + # This is an image based on node:$(NODE_VER)-alpine + runtime: $(DOCKER_NS)/fabric-nodeenv:$(TWO_DIGIT_VERSION) -// buildPeerEnvironment builds the environment variables for the peer -func (p *LocalPeer) buildPeerEnvironment(mspConfigPath string) map[string]string { - env := make(map[string]string) + # List of directories to treat as external builders and launchers for + # chaincode. The external builder detection processing will iterate over the + # builders in the order specified below. + externalBuilders: + - name: ccaas_builder + path: {{.ExternalBuilderPath}} + # The maximum duration to wait for the chaincode build and install process + # to complete. + installTimeout: 8m0s - // Add custom environment variables from opts - for k, v := range p.opts.Env { - env[k] = v - } + # Timeout duration for starting up a container and waiting for Register + # to come through. + startuptimeout: 5m0s - // Add required environment variables - env["CORE_PEER_MSPCONFIGPATH"] = mspConfigPath - env["FABRIC_CFG_PATH"] = mspConfigPath - env["CORE_PEER_TLS_ROOTCERT_FILE"] = filepath.Join(mspConfigPath, "tlscacerts/cacert.pem") - env["CORE_PEER_TLS_KEY_FILE"] = filepath.Join(mspConfigPath, "tls.key") - env["CORE_PEER_TLS_CLIENTCERT_FILE"] = filepath.Join(mspConfigPath, "tls.crt") - env["CORE_PEER_TLS_CLIENTKEY_FILE"] = filepath.Join(mspConfigPath, "tls.key") - env["CORE_PEER_TLS_CERT_FILE"] = filepath.Join(mspConfigPath, "tls.crt") - env["CORE_PEER_TLS_CLIENTAUTHREQUIRED"] = "false" - env["CORE_PEER_TLS_CLIENTROOTCAS_FILES"] = filepath.Join(mspConfigPath, "tlscacerts/cacert.pem") - env["CORE_PEER_ADDRESS"] = p.opts.ExternalEndpoint - env["CORE_PEER_GOSSIP_EXTERNALENDPOINT"] = p.opts.ExternalEndpoint - env["CORE_PEER_GOSSIP_ENDPOINT"] = p.opts.ExternalEndpoint - env["CORE_PEER_LISTENADDRESS"] = p.opts.ListenAddress - env["CORE_PEER_CHAINCODELISTENADDRESS"] = p.opts.ChaincodeAddress - env["CORE_PEER_EVENTS_ADDRESS"] = p.opts.EventsAddress - env["CORE_OPERATIONS_LISTENADDRESS"] = p.opts.OperationsListenAddress - env["CORE_PEER_NETWORKID"] = "peer01-nid" - env["CORE_PEER_LOCALMSPID"] = p.mspID - env["CORE_PEER_ID"] = p.opts.ID - env["CORE_OPERATIONS_TLS_ENABLED"] = "false" - env["CORE_OPERATIONS_TLS_CLIENTAUTHREQUIRED"] = "false" - env["CORE_PEER_GOSSIP_ORGLEADER"] = "true" - env["CORE_PEER_GOSSIP_BOOTSTRAP"] = p.opts.ExternalEndpoint - env["CORE_PEER_PROFILE_ENABLED"] = "true" - env["CORE_PEER_ADDRESSAUTODETECT"] = "false" - env["CORE_LOGGING_GOSSIP"] = "info" - env["FABRIC_LOGGING_SPEC"] = "info" - env["CORE_LOGGING_LEDGER"] = "info" - env["CORE_LOGGING_MSP"] = "info" - env["CORE_PEER_COMMITTER_ENABLED"] = "true" - env["CORE_PEER_DISCOVERY_TOUCHPERIOD"] = "60s" - env["CORE_PEER_GOSSIP_USELEADERELECTION"] = "false" - env["CORE_PEER_DISCOVERY_PERIOD"] = "60s" - env["CORE_METRICS_PROVIDER"] = "prometheus" - env["CORE_LOGGING_CAUTHDSL"] = "info" - env["CORE_LOGGING_POLICIES"] = "info" - env["CORE_LEDGER_STATE_STATEDATABASE"] = "goleveldb" - env["CORE_PEER_TLS_ENABLED"] = "true" - env["CORE_LOGGING_GRPC"] = "info" - env["CORE_LOGGING_PEER"] = "info" + # Timeout duration for Invoke and Init calls to prevent runaway. + # This timeout is used by all chaincodes in all the channels, including + # system chaincodes. + # Note that during Invoke, if the image is not available (e.g. being + # cleaned up when in development environment), the peer will automatically + # build the image, which might take more time. In production environment, + # the chaincode image is unlikely to be deleted, so the timeout could be + # reduced accordingly. + executetimeout: 30s - return env -} + # There are 2 modes: "dev" and "net". + # In dev mode, user runs the chaincode after starting peer from + # command line on local machine. + # In net mode, peer will run chaincode in a docker container. + mode: net -// startDocker starts the peer in a docker container -func (p *LocalPeer) startDocker(env map[string]string, mspConfigPath, dataConfigPath string) (*StartDockerResponse, error) { - // Convert env map to array of "-e KEY=VALUE" arguments - var envArgs []string - for k, v := range env { - envArgs = append(envArgs, "-e", fmt.Sprintf("%s=%s", k, v)) - } + # keepalive in seconds. In situations where the communication goes through a + # proxy that does not support keep-alive, this parameter will maintain connection + # between peer and chaincode. + # A value <= 0 turns keepalive off + keepalive: 0 - containerName, err := p.getContainerName() - if err != nil { - return nil, fmt.Errorf("failed to get container name: %w", err) - } + # enabled system chaincodes + system: + _lifecycle: enable + cscc: enable + lscc: enable + escc: enable + vscc: enable + qscc: enable - // Prepare docker run command arguments - args := []string{ - "run", - "-d", - "--name", containerName, - } - args = append(args, envArgs...) - args = append(args, - "-v", fmt.Sprintf("%s:/etc/hyperledger/fabric/msp", mspConfigPath), - "-v", fmt.Sprintf("%s:/var/hyperledger/production", dataConfigPath), - "-p", fmt.Sprintf("%s:7051", strings.Split(p.opts.ListenAddress, ":")[1]), - "-p", fmt.Sprintf("%s:7052", strings.Split(p.opts.ChaincodeAddress, ":")[1]), - "-p", fmt.Sprintf("%s:7053", strings.Split(p.opts.EventsAddress, ":")[1]), - "-p", fmt.Sprintf("%s:9443", strings.Split(p.opts.OperationsListenAddress, ":")[1]), - "hyperledger/fabric-peer:2.5.9", - "peer", - "node", - "start", - ) + # Logging section for the chaincode container + logging: + # Default level for all loggers within the chaincode container + level: info + # Override default level for the 'shim' logger + shim: warning + # Format for the chaincode container logs + format: '%{color}%{time:2006-01-02 15:04:05.000 MST} [%{module}] %{shortfunc} -> %{level:.4s} %{id:03x}%{color:reset} %{message}' - cmd := exec.Command("docker", args...) - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr +############################################################################### +# +# Ledger section - ledger configuration encompasses both the blockchain +# and the state +# +############################################################################### +ledger: - if err := cmd.Run(); err != nil { - return nil, fmt.Errorf("failed to start docker container: %w", err) - } + blockchain: + snapshots: + rootDir: {{.DataPath}}/snapshots - return &StartDockerResponse{ - Mode: "docker", - ContainerName: containerName, - }, nil -} + state: + # stateDatabase - options are "goleveldb", "CouchDB" + # goleveldb - default state database stored in goleveldb. + # CouchDB - store state database in CouchDB + stateDatabase: goleveldb + # Limit on the number of records to return per query + totalQueryLimit: 100000 + couchDBConfig: + # It is recommended to run CouchDB on the same server as the peer, and + # not map the CouchDB container port to a server port in docker-compose. + # Otherwise proper security must be provided on the connection between + # CouchDB client (on the peer) and server. + couchDBAddress: 127.0.0.1:5984 + # This username must have read and write authority on CouchDB + username: + # The password is recommended to pass as an environment variable + # during start up (eg CORE_LEDGER_STATE_COUCHDBCONFIG_PASSWORD). + # If it is stored here, the file must be access control protected + # to prevent unintended users from discovering the password. + password: + # Number of retries for CouchDB errors + maxRetries: 3 + # Number of retries for CouchDB errors during peer startup. + # The delay between retries doubles for each attempt. + # Default of 10 retries results in 11 attempts over 2 minutes. + maxRetriesOnStartup: 10 + # CouchDB request timeout (unit: duration, e.g. 20s) + requestTimeout: 35s + # Limit on the number of records per each CouchDB query + # Note that chaincode queries are only bound by totalQueryLimit. + # Internally the chaincode may execute multiple CouchDB queries, + # each of size internalQueryLimit. + internalQueryLimit: 1000 + # Limit on the number of records per CouchDB bulk update batch + maxBatchUpdateSize: 1000 + # Warm indexes after every N blocks. + # This option warms any indexes that have been + # deployed to CouchDB after every N blocks. + # A value of 1 will warm indexes after every block commit, + # to ensure fast selector queries. + # Increasing the value may improve write efficiency of peer and CouchDB, + # but may degrade query response time. + warmIndexesAfterNBlocks: 1 + # Create the _global_changes system database + # This is optional. Creating the global changes database will require + # additional system resources to track changes and maintain the database + createGlobalChangesDB: false + # CacheSize denotes the maximum mega bytes (MB) to be allocated for the in-memory state + # cache. Note that CacheSize needs to be a multiple of 32 MB. If it is not a multiple + # of 32 MB, the peer would round the size to the next multiple of 32 MB. + # To disable the cache, 0 MB needs to be assigned to the cacheSize. + cacheSize: 64 -// Stop stops the peer node -func (p *LocalPeer) Stop() error { - p.logger.Info("Stopping peer", "opts", p.opts) + history: + # enableHistoryDatabase - options are true or false + # Indicates if the history of key updates should be stored. + # All history 'index' will be stored in goleveldb, regardless if using + # CouchDB or alternate database for the state. + enableHistoryDatabase: true - switch p.mode { - case "service": - platform := runtime.GOOS - switch platform { - case "linux": - return p.stopSystemdService() - case "darwin": - return p.stopLaunchdService() - default: - return fmt.Errorf("unsupported platform for service mode: %s", platform) - } - case "docker": - return p.stopDocker() - default: - return fmt.Errorf("invalid mode: %s", p.mode) - } -} + pvtdataStore: + # the maximum db batch size for converting + # the ineligible missing data entries to eligible missing data entries + collElgProcMaxDbBatchSize: 5000 + # the minimum duration (in milliseconds) between writing + # two consecutive db batches for converting the ineligible missing data entries to eligible missing data entries + collElgProcDbBatchesInterval: 1000 -// stopDocker stops the peer docker container -func (p *LocalPeer) stopDocker() error { - containerName, err := p.getContainerName() - if err != nil { - return fmt.Errorf("failed to get container name: %w", err) - } +############################################################################### +# +# Operations section +# +############################################################################### +operations: + # host and port for the operations server + listenAddress: 127.0.0.1:9443 - // Stop the container - stopCmd := exec.Command("docker", "stop", containerName) - if err := stopCmd.Run(); err != nil { - return fmt.Errorf("failed to stop docker container: %w", err) - } + # TLS configuration for the operations endpoint + tls: + # TLS enabled + enabled: false - // Remove the container - rmCmd := exec.Command("docker", "rm", "-f", containerName) - if err := rmCmd.Run(); err != nil { - p.logger.Warn("Failed to remove docker container", "error", err) - // Don't return error as the container might not exist - } + # path to PEM encoded server certificate for the operations server + cert: + file: - return nil -} + # path to PEM encoded server key for the operations server + key: + file: -// stopSystemdService stops the systemd service -func (p *LocalPeer) stopSystemdService() error { - serviceName := p.getServiceName() + # most operations service endpoints require client authentication when TLS + # is enabled. clientAuthRequired requires client certificate authentication + # at the TLS layer to access all resources. + clientAuthRequired: false - // Stop the service - if err := p.execSystemctl("stop", serviceName); err != nil { - return fmt.Errorf("failed to stop systemd service: %w", err) - } + # paths to PEM encoded ca certificates to trust for client authentication + clientRootCAs: + files: [] - // Disable the service - if err := p.execSystemctl("disable", serviceName); err != nil { - p.logger.Warn("Failed to disable systemd service", "error", err) - // Don't return error as this is not critical - } +############################################################################### +# +# Metrics section +# +############################################################################### +metrics: + # metrics provider is one of statsd, prometheus, or disabled + provider: disabled - // Remove the service file - if err := os.Remove(p.getServiceFilePath()); err != nil { - if !os.IsNotExist(err) { - p.logger.Warn("Failed to remove service file", "error", err) - // Don't return error as this is not critical - } - } + # statsd configuration + statsd: + # network type: tcp or udp + network: udp - // Reload systemd daemon - if err := p.execSystemctl("daemon-reload"); err != nil { - p.logger.Warn("Failed to reload systemd daemon", "error", err) - // Don't return error as this is not critical - } + # statsd server address + address: 127.0.0.1:8125 - return nil -} + # the interval at which locally cached counters and gauges are pushed + # to statsd; timings are pushed immediately + writeInterval: 10s -// stopLaunchdService stops the launchd service -func (p *LocalPeer) stopLaunchdService() error { - // Stop the service - stopCmd := exec.Command("launchctl", "stop", p.getLaunchdServiceName()) - if err := stopCmd.Run(); err != nil { - p.logger.Warn("Failed to stop launchd service", "error", err) - // Continue anyway as we want to make sure it's unloaded - } + # prefix is prepended to all emitted statsd metrics + prefix: - // Unload the service - unloadCmd := exec.Command("launchctl", "unload", p.getLaunchdPlistPath()) - if err := unloadCmd.Run(); err != nil { - return fmt.Errorf("failed to unload launchd service: %w", err) - } +` - return nil +// LocalPeer represents a local Fabric peer node +type LocalPeer struct { + mspID string + db *db.Queries + opts StartPeerOpts + mode string + org *fabricservice.OrganizationDTO + organizationID int64 + orgService *fabricservice.OrganizationService + keyService *keymanagement.KeyManagementService + nodeID int64 + logger *logger.Logger + configService *config.ConfigService + settingsService *settingsservice.SettingsService } -// execSystemctl executes a systemctl command -func (p *LocalPeer) execSystemctl(command string, args ...string) error { - cmdArgs := append([]string{"systemctl", command}, args...) - cmd := exec.Command("sudo", cmdArgs...) - if err := cmd.Run(); err != nil { - return fmt.Errorf("systemctl %s failed: %w", command, err) +// NewLocalPeer creates a new LocalPeer instance +func NewLocalPeer( + mspID string, + db *db.Queries, + opts StartPeerOpts, + mode string, + org *fabricservice.OrganizationDTO, + organizationID int64, + orgService *fabricservice.OrganizationService, + keyService *keymanagement.KeyManagementService, + nodeID int64, + logger *logger.Logger, + configService *config.ConfigService, + settingsService *settingsservice.SettingsService, +) *LocalPeer { + return &LocalPeer{ + mspID: mspID, + db: db, + opts: opts, + mode: mode, + org: org, + organizationID: organizationID, + orgService: orgService, + keyService: keyService, + nodeID: nodeID, + logger: logger, + configService: configService, + settingsService: settingsService, } - return nil } -// RenewCertificates renews the peer's certificates -func (p *LocalPeer) RenewCertificates() error { - // Implementation details for certificate renewal - return nil +// getServiceName returns the systemd service name +func (p *LocalPeer) getServiceName() string { + return fmt.Sprintf("fabric-peer-%s", strings.ReplaceAll(strings.ToLower(p.opts.ID), " ", "-")) } -type NetworkConfigResponse struct { - NetworkConfig string +// getLaunchdServiceName returns the launchd service name +func (p *LocalPeer) getLaunchdServiceName() string { + return fmt.Sprintf("dev.chainlaunch.peer.%s.%s", + strings.ToLower(p.org.MspID), + strings.ReplaceAll(strings.ToLower(p.opts.ID), " ", "-")) } -type Org struct { - MSPID string - CertAuths []string - Peers []string - Orderers []string + +// getServiceFilePath returns the systemd service file path +func (p *LocalPeer) getServiceFilePath() string { + return fmt.Sprintf("/etc/systemd/system/%s.service", p.getServiceName()) +} + +// getLaunchdPlistPath returns the launchd plist file path +func (p *LocalPeer) getLaunchdPlistPath() string { + homeDir, _ := os.UserHomeDir() + return filepath.Join(homeDir, "Library/LaunchAgents", p.getLaunchdServiceName()+".plist") } -type Peer struct { - Name string - URL string - TLSCACert string + +// GetStdOutPath returns the path to the stdout log file +func (p *LocalPeer) GetStdOutPath() string { + dirPath := filepath.Join(p.configService.GetDataPath(), "peers", + strings.ReplaceAll(strings.ToLower(p.opts.ID), " ", "-")) + return filepath.Join(dirPath, p.getServiceName()+".log") } -type CA struct { - Name string - URL string - TLSCert string - EnrollID string - EnrollSecret string + +func (p *LocalPeer) getPeerPath() string { + return filepath.Join(p.configService.GetDataPath(), "peers", + strings.ReplaceAll(strings.ToLower(p.opts.ID), " ", "-")) } -type Orderer struct { - URL string - Name string - TLSCACert string +// getContainerName returns the docker container name +func (p *LocalPeer) getContainerName() (string, error) { + org, err := p.orgService.GetOrganization(context.Background(), p.organizationID) + if err != nil { + return "", fmt.Errorf("failed to get organization: %w", err) + } + return fmt.Sprintf("%s-%s", + strings.ToLower(org.MspID), + strings.ReplaceAll(strings.ToLower(p.opts.ID), " ", "-")), nil } -const tmplGoConfig = ` -name: hlf-network -version: 1.0.0 -client: - organization: "{{ .Organization }}" -{{- if not .Organizations }} -organizations: {} -{{- else }} -organizations: - {{ range $org := .Organizations }} - {{ $org.MSPID }}: - mspid: {{ $org.MSPID }} - cryptoPath: /tmp/cryptopath - users: {} -{{- if not $org.CertAuths }} - certificateAuthorities: [] -{{- else }} - certificateAuthorities: - {{- range $ca := $org.CertAuths }} - - {{ $ca.Name }} - {{- end }} -{{- end }} -{{- if not $org.Peers }} - peers: [] -{{- else }} - peers: - {{- range $peer := $org.Peers }} - - {{ $peer }} - {{- end }} -{{- end }} -{{- if not $org.Orderers }} - orderers: [] -{{- else }} - orderers: - {{- range $orderer := $org.Orderers }} - - {{ $orderer }} - {{- end }} +// findPeerBinary finds the peer binary in PATH +func (p *LocalPeer) findPeerBinary() (string, error) { - {{- end }} -{{- end }} -{{- end }} + downloader, err := binaries.NewBinaryDownloader(p.configService) + if err != nil { + return "", fmt.Errorf("failed to create binary downloader: %w", err) + } -{{- if not .Orderers }} -{{- else }} -orderers: -{{- range $orderer := .Orderers }} - {{$orderer.Name}}: - url: {{ $orderer.URL }} - grpcOptions: - allow-insecure: false - tlsCACerts: - pem: | -{{ $orderer.TLSCACert | indent 8 }} -{{- end }} -{{- end }} + return downloader.GetBinaryPath(binaries.PeerBinary, p.opts.Version) +} -{{- if not .Peers }} -{{- else }} -peers: - {{- range $peer := .Peers }} - {{$peer.Name}}: - url: {{ $peer.URL }} - tlsCACerts: - pem: | -{{ $peer.TLSCACert | indent 8 }} -{{- end }} -{{- end }} +// Init initializes the peer configuration +func (p *LocalPeer) Init() (types.NodeDeploymentConfig, error) { + ctx := context.Background() + // Get node from database + node, err := p.db.GetNode(ctx, p.nodeID) + if err != nil { + return nil, fmt.Errorf("failed to get node: %w", err) + } -{{- if not .CertAuths }} -{{- else }} -certificateAuthorities: -{{- range $ca := .CertAuths }} - {{ $ca.Name }}: - url: https://{{ $ca.URL }} -{{if $ca.EnrollID }} - registrar: - enrollId: {{ $ca.EnrollID }} - enrollSecret: "{{ $ca.EnrollSecret }}" -{{ end }} - caName: {{ $ca.CAName }} - tlsCACerts: - pem: - - | -{{ $ca.TLSCert | indent 12 }} + p.logger.Info("Initializing peer", + "opts", p.opts, + "node", node, + "orgID", p.organizationID, + "nodeID", p.nodeID, + ) -{{- end }} -{{- end }} + // Get organization + org, err := p.orgService.GetOrganization(ctx, p.organizationID) + if err != nil { + return nil, fmt.Errorf("failed to get organization: %w", err) + } -channels: - _default: -{{- if not .Orderers }} - orderers: [] -{{- else }} - orderers: -{{- range $orderer := .Orderers }} - - {{$orderer.Name}} -{{- end }} -{{- end }} -{{- if not .Peers }} - peers: {} -{{- else }} - peers: -{{- range $peer := .Peers }} - {{$peer.Name}}: - discover: true - endorsingPeer: true - chaincodeQuery: true - ledgerQuery: true - eventSource: true -{{- end }} -{{- end }} + signCAKeyDB, err := p.keyService.GetKey(ctx, int(org.SignKeyID.Int64)) + if err != nil { + return nil, fmt.Errorf("failed to retrieve sign CA cert: %w", err) + } -` + tlsCAKeyDB, err := p.keyService.GetKey(ctx, int(org.TlsRootKeyID.Int64)) + if err != nil { + return nil, fmt.Errorf("failed to retrieve TLS CA cert: %w", err) + } + isCA := 0 + description := "Sign key for " + p.opts.ID + curveP256 := kmodels.ECCurveP256 + providerID := 1 -func (p *LocalPeer) generateNetworkConfigForPeer( - peerUrl string, peerMspID string, peerTlsCACert string, ordererUrl string, ordererTlsCACert string) (*NetworkConfigResponse, error) { + // Create Sign Key + signKeyDB, err := p.keyService.CreateKey(ctx, kmodels.CreateKeyRequest{ + Algorithm: kmodels.KeyAlgorithmEC, + Name: p.opts.ID, + IsCA: &isCA, + Description: &description, + Curve: &curveP256, + ProviderID: &providerID, + }, int(org.SignKeyID.Int64)) + if err != nil { + return nil, fmt.Errorf("failed to create sign key: %w", err) + } - tmpl, err := template.New("networkConfig").Funcs(sprig.HermeticTxtFuncMap()).Parse(tmplGoConfig) + // Sign Sign Key + signKeyDB, err = p.keyService.SignCertificate(ctx, signKeyDB.ID, signCAKeyDB.ID, kmodels.CertificateRequest{ + CommonName: p.opts.ID, + Organization: []string{org.MspID}, + OrganizationalUnit: []string{"peer"}, + DNSNames: []string{p.opts.ID}, + IsCA: true, + KeyUsage: x509.KeyUsageCertSign, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, + }) if err != nil { - return nil, err + return nil, fmt.Errorf("failed to sign sign key: %w", err) } - var buf bytes.Buffer - orgs := []*Org{} - var peers []*Peer - var certAuths []*CA - var ordererNodes []*Orderer - org := &Org{ - MSPID: peerMspID, - CertAuths: []string{}, - Peers: []string{"peer0"}, - Orderers: []string{}, + signKey, err := p.keyService.GetDecryptedPrivateKey(int(signKeyDB.ID)) + if err != nil { + return nil, fmt.Errorf("failed to get sign private key: %w", err) } - orgs = append(orgs, org) - if peerTlsCACert != "" { - peer := &Peer{ - Name: "peer0", - URL: peerUrl, - TLSCACert: peerTlsCACert, + + // Create TLS key + tlsKeyDB, err := p.keyService.CreateKey(ctx, kmodels.CreateKeyRequest{ + Algorithm: kmodels.KeyAlgorithmEC, + Name: p.opts.ID, + IsCA: &isCA, + Description: &description, + Curve: &curveP256, + ProviderID: &providerID, + }, int(org.SignKeyID.Int64)) + if err != nil { + return nil, fmt.Errorf("failed to create sign key: %w", err) + } + domainNames := p.opts.DomainNames + + // Ensure localhost and 127.0.0.1 are included in domain names + hasLocalhost := false + hasLoopback := false + var ipAddresses []net.IP + var domains []string + for _, domain := range domainNames { + if domain == "localhost" { + hasLocalhost = true + domains = append(domains, domain) + continue + } + if domain == "127.0.0.1" { + hasLoopback = true + ipAddresses = append(ipAddresses, net.ParseIP(domain)) + continue + } + if ip := net.ParseIP(domain); ip != nil { + ipAddresses = append(ipAddresses, ip) + } else { + domains = append(domains, domain) } - peers = append(peers, peer) } - - orderer := &Orderer{ - URL: ordererUrl, - Name: "orderer0", - TLSCACert: ordererTlsCACert, + if !hasLocalhost { + domains = append(domains, "localhost") } - ordererNodes = append(ordererNodes, orderer) - err = tmpl.Execute(&buf, map[string]interface{}{ - "Peers": peers, - "Orderers": ordererNodes, - "Organizations": orgs, - "CertAuths": certAuths, - "Organization": peerMspID, - "Internal": false, - }) - if err != nil { - return nil, err + if !hasLoopback { + ipAddresses = append(ipAddresses, net.ParseIP("127.0.0.1")) } - return &NetworkConfigResponse{ - NetworkConfig: buf.String(), - }, nil -} - -// JoinChannel joins the peer to a channel -func (p *LocalPeer) JoinChannel(genesisBlock []byte) error { - p.logger.Info("Joining peer to channel", "peer", p.opts.ID) + p.opts.DomainNames = domains - // Create temporary file for genesis block - tmpFile, err := os.CreateTemp("", "genesis-block-*.block") + // Sign TLS certificates + validFor := kmodels.Duration(time.Hour * 24 * 365) + tlsKeyDB, err = p.keyService.SignCertificate(ctx, tlsKeyDB.ID, tlsCAKeyDB.ID, kmodels.CertificateRequest{ + CommonName: p.opts.ID, + Organization: []string{org.MspID}, + OrganizationalUnit: []string{"peer"}, + DNSNames: domains, + IPAddresses: ipAddresses, + IsCA: true, + ValidFor: validFor, + KeyUsage: x509.KeyUsageCertSign, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, + }) if err != nil { - return fmt.Errorf("failed to create temp file: %w", err) + return nil, fmt.Errorf("failed to sign TLS certificate: %w", err) } - defer os.Remove(tmpFile.Name()) - - homeDir, err := os.UserHomeDir() + tlsKey, err := p.keyService.GetDecryptedPrivateKey(int(tlsKeyDB.ID)) if err != nil { - return fmt.Errorf("failed to get home directory: %w", err) + return nil, fmt.Errorf("failed to get TLS private key: %w", err) } + // Create directory structure slugifiedID := strings.ReplaceAll(strings.ToLower(p.opts.ID), " ", "-") - peerConfigPath := filepath.Join(homeDir, ".chainlaunch/peers", slugifiedID, "config") + dirPath := filepath.Join(p.configService.GetDataPath(), "peers", slugifiedID) + dataConfigPath := filepath.Join(dirPath, "data") + mspConfigPath := filepath.Join(dirPath, "config") - // Write genesis block to file - if err := os.WriteFile(tmpFile.Name(), genesisBlock, 0644); err != nil { - return fmt.Errorf("failed to write genesis block: %w", err) + // Create directories + if err := os.MkdirAll(dataConfigPath, 0755); err != nil { + return nil, fmt.Errorf("failed to create data directory: %w", err) + } + if err := os.MkdirAll(mspConfigPath, 0755); err != nil { + return nil, fmt.Errorf("failed to create msp directory: %w", err) } - // Build peer channel join command - peerBinary, err := p.findPeerBinary() - if err != nil { - return fmt.Errorf("failed to find peer binary: %w", err) + // Write certificates and keys + if err := p.writeCertificatesAndKeys(mspConfigPath, tlsKeyDB, signKeyDB, tlsKey, signKey, signCAKeyDB, tlsCAKeyDB); err != nil { + return nil, fmt.Errorf("failed to write certificates and keys: %w", err) } - mspConfigPath, err := p.PrepareAdminCertMSP(p.org.MspID) - if err != nil { - return fmt.Errorf("failed to prepare admin cert MSP: %w", err) + + // Create external builders + if err := p.setupExternalBuilders(mspConfigPath); err != nil { + return nil, fmt.Errorf("failed to setup external builders: %w", err) } - cmd := exec.Command(peerBinary, "channel", "join", "-b", tmpFile.Name()) - listenAddress := strings.Replace(p.opts.ListenAddress, "0.0.0.0", "localhost", 1) - // Set environment variables - cmd.Env = append(os.Environ(), - fmt.Sprintf("CORE_PEER_MSPCONFIGPATH=%s", mspConfigPath), - fmt.Sprintf("CORE_PEER_ADDRESS=%s", listenAddress), - fmt.Sprintf("CORE_PEER_LOCALMSPID=%s", p.mspID), - "CORE_PEER_TLS_ENABLED=true", - fmt.Sprintf("CORE_PEER_TLS_ROOTCERT_FILE=%s", filepath.Join(mspConfigPath, "tlscacerts", "cacert.pem")), - fmt.Sprintf("FABRIC_CFG_PATH=%s", peerConfigPath), - ) - // Execute command - output, err := cmd.CombinedOutput() - if err != nil { - return fmt.Errorf("failed to join channel: %w, output: %s", err, string(output)) + // Write config files + if err := p.writeConfigFiles(mspConfigPath, dataConfigPath); err != nil { + return nil, fmt.Errorf("failed to write config files: %w", err) } - p.logger.Info("Successfully joined channel", "peer", p.opts.ID) - return nil + return &types.FabricPeerDeploymentConfig{ + BaseDeploymentConfig: types.BaseDeploymentConfig{ + Type: "fabric-peer", + Mode: p.mode, + }, + OrganizationID: p.organizationID, + MSPID: p.mspID, + SignKeyID: int64(signKeyDB.ID), + TLSKeyID: int64(tlsKeyDB.ID), + ListenAddress: p.opts.ListenAddress, + ChaincodeAddress: p.opts.ChaincodeAddress, + EventsAddress: p.opts.EventsAddress, + OperationsListenAddress: p.opts.OperationsListenAddress, + ExternalEndpoint: p.opts.ExternalEndpoint, + DomainNames: p.opts.DomainNames, + SignCert: *signKeyDB.Certificate, + TLSCert: *tlsKeyDB.Certificate, + CACert: *signCAKeyDB.Certificate, + TLSCACert: *tlsCAKeyDB.Certificate, + }, nil } -// writeCertificatesAndKeys writes the certificates and keys to the MSP directory structure -func (p *LocalPeer) writeCertificatesAndKeys( - mspConfigPath string, - tlsCert *kmodels.KeyResponse, - signCert *kmodels.KeyResponse, - tlsKey string, - signKey string, - signCACert *kmodels.KeyResponse, - tlsCACert *kmodels.KeyResponse, -) error { - // Write TLS certificates and keys - if err := os.WriteFile(filepath.Join(mspConfigPath, "tls.crt"), []byte(*tlsCert.Certificate), 0644); err != nil { - return fmt.Errorf("failed to write TLS certificate: %w", err) - } - if err := os.WriteFile(filepath.Join(mspConfigPath, "tls.key"), []byte(tlsKey), 0600); err != nil { - return fmt.Errorf("failed to write TLS key: %w", err) - } +const DefaultPeerCmdTemplate = "{{.Cmd}}" - // Create and write to signcerts directory - signcertsPath := filepath.Join(mspConfigPath, "signcerts") - if err := os.MkdirAll(signcertsPath, 0755); err != nil { - return fmt.Errorf("failed to create signcerts directory: %w", err) - } - if err := os.WriteFile(filepath.Join(signcertsPath, "cert.pem"), []byte(*signCert.Certificate), 0644); err != nil { - return fmt.Errorf("failed to write signing certificate: %w", err) - } +// Start starts the peer node +func (p *LocalPeer) Start() (interface{}, error) { + p.logger.Info("Starting peer", "opts", p.opts) + slugifiedID := strings.ReplaceAll(strings.ToLower(p.opts.ID), " ", "-") - // Write root CA certificate - if err := os.WriteFile(filepath.Join(mspConfigPath, "cacert.pem"), []byte(*signCACert.Certificate), 0644); err != nil { - return fmt.Errorf("failed to write CA certificate: %w", err) - } + dirPath := filepath.Join(p.configService.GetDataPath(), "peers", slugifiedID) + mspConfigPath := filepath.Join(dirPath, "config") + dataConfigPath := filepath.Join(dirPath, "data") - // Create and write to cacerts directory - cacertsPath := filepath.Join(mspConfigPath, "cacerts") - if err := os.MkdirAll(cacertsPath, 0755); err != nil { - return fmt.Errorf("failed to create cacerts directory: %w", err) - } - if err := os.WriteFile(filepath.Join(cacertsPath, "cacert.pem"), []byte(*signCACert.Certificate), 0644); err != nil { - return fmt.Errorf("failed to write CA certificate to cacerts: %w", err) + // Find peer binary + peerBinary, err := p.findPeerBinary() + if err != nil { + return nil, fmt.Errorf("failed to find peer binary: %w", err) } - - // Create and write to tlscacerts directory - tlscacertsPath := filepath.Join(mspConfigPath, "tlscacerts") - if err := os.MkdirAll(tlscacertsPath, 0755); err != nil { - return fmt.Errorf("failed to create tlscacerts directory: %w", err) + setting, err := p.settingsService.GetSetting(context.Background()) + if err != nil { + return nil, fmt.Errorf("failed to get setting: %w", err) } - if err := os.WriteFile(filepath.Join(tlscacertsPath, "cacert.pem"), []byte(*tlsCACert.Certificate), 0644); err != nil { - return fmt.Errorf("failed to write TLS CA certificate: %w", err) + var peerTemplateCMD string + if setting.Config.PeerTemplateCMD == "" { + peerTemplateCMD = DefaultPeerCmdTemplate + } else { + peerTemplateCMD = setting.Config.PeerTemplateCMD } - // Create and write to keystore directory - keystorePath := filepath.Join(mspConfigPath, "keystore") - if err := os.MkdirAll(keystorePath, 0755); err != nil { - return fmt.Errorf("failed to create keystore directory: %w", err) - } - if err := os.WriteFile(filepath.Join(keystorePath, "key.pem"), []byte(signKey), 0600); err != nil { - return fmt.Errorf("failed to write signing key: %w", err) + // Parse template and build command + tmpl, err := template.New("peer").Parse(peerTemplateCMD) + if err != nil { + return nil, fmt.Errorf("failed to parse peer template: %w", err) } - return nil -} - -// setupExternalBuilders creates and configures the external builders for chaincode -func (p *LocalPeer) setupExternalBuilders(mspConfigPath string) error { - // Create external builder directory structure - rootExternalBuilderPath := filepath.Join(mspConfigPath, "ccaas") - binExternalBuilderPath := filepath.Join(rootExternalBuilderPath, "bin") - if err := os.MkdirAll(binExternalBuilderPath, 0755); err != nil { - return fmt.Errorf("failed to create external builder directory: %w", err) + var cmdBuf bytes.Buffer + if err := tmpl.Execute(&cmdBuf, struct{ Cmd string }{ + Cmd: fmt.Sprintf("%s node start", peerBinary), + }); err != nil { + return nil, fmt.Errorf("failed to execute peer template: %w", err) } + cmd := cmdBuf.String() + env := p.buildPeerEnvironment(mspConfigPath) - // Create build script - buildScript := `#!/bin/bash - -SOURCE=$1 -OUTPUT=$3 - -#external chaincodes expect connection.json file in the chaincode package -if [ ! -f "$SOURCE/connection.json" ]; then - >&2 echo "$SOURCE/connection.json not found" - exit 1 -fi - -#simply copy the endpoint information to specified output location -cp $SOURCE/connection.json $OUTPUT/connection.json - -if [ -d "$SOURCE/metadata" ]; then - cp -a $SOURCE/metadata $OUTPUT/metadata -fi - -exit 0` + p.logger.Debug("Starting peer", + "mode", p.mode, + "cmd", cmd, + "env", env, + "dirPath", dirPath, + ) - if err := os.WriteFile(filepath.Join(binExternalBuilderPath, "build"), []byte(buildScript), 0755); err != nil { - return fmt.Errorf("failed to write build script: %w", err) + switch p.mode { + case "service": + return p.startService(cmd, env, dirPath) + case "docker": + return p.startDocker(env, mspConfigPath, dataConfigPath) + default: + return nil, fmt.Errorf("invalid mode: %s", p.mode) } +} - // Create detect script - detectScript := `#!/bin/bash +// buildPeerEnvironment builds the environment variables for the peer +func (p *LocalPeer) buildPeerEnvironment(mspConfigPath string) map[string]string { + env := make(map[string]string) -METADIR=$2 -# check if the "type" field is set to "external" -# crude way without jq which is not in the default fabric peer image -TYPE=$(tr -d '\n' < "$METADIR/metadata.json" | awk -F':' '{ for (i = 1; i < NF; i++){ if ($i~/type/) { print $(i+1); break }}}'| cut -d\" -f2) + // Add custom environment variables from opts + for k, v := range p.opts.Env { + env[k] = v + } -if [ "$TYPE" = "ccaas" ]; then - exit 0 -fi + // Add required environment variables + env["CORE_PEER_MSPCONFIGPATH"] = mspConfigPath + env["FABRIC_CFG_PATH"] = mspConfigPath + env["CORE_PEER_TLS_ROOTCERT_FILE"] = filepath.Join(mspConfigPath, "tlscacerts/cacert.pem") + env["CORE_PEER_TLS_KEY_FILE"] = filepath.Join(mspConfigPath, "tls.key") + env["CORE_PEER_TLS_CLIENTCERT_FILE"] = filepath.Join(mspConfigPath, "tls.crt") + env["CORE_PEER_TLS_CLIENTKEY_FILE"] = filepath.Join(mspConfigPath, "tls.key") + env["CORE_PEER_TLS_CERT_FILE"] = filepath.Join(mspConfigPath, "tls.crt") + env["CORE_PEER_TLS_CLIENTAUTHREQUIRED"] = "false" + env["CORE_PEER_TLS_CLIENTROOTCAS_FILES"] = filepath.Join(mspConfigPath, "tlscacerts/cacert.pem") + env["CORE_PEER_ADDRESS"] = p.opts.ExternalEndpoint + env["CORE_PEER_GOSSIP_EXTERNALENDPOINT"] = p.opts.ExternalEndpoint + env["CORE_PEER_GOSSIP_ENDPOINT"] = p.opts.ExternalEndpoint + env["CORE_PEER_LISTENADDRESS"] = p.opts.ListenAddress + env["CORE_PEER_CHAINCODELISTENADDRESS"] = p.opts.ChaincodeAddress + env["CORE_PEER_EVENTS_ADDRESS"] = p.opts.EventsAddress + env["CORE_OPERATIONS_LISTENADDRESS"] = p.opts.OperationsListenAddress + env["CORE_PEER_NETWORKID"] = "peer01-nid" + env["CORE_PEER_LOCALMSPID"] = p.mspID + env["CORE_PEER_ID"] = p.opts.ID + env["CORE_OPERATIONS_TLS_ENABLED"] = "false" + env["CORE_OPERATIONS_TLS_CLIENTAUTHREQUIRED"] = "false" + env["CORE_PEER_GOSSIP_ORGLEADER"] = "true" + env["CORE_PEER_GOSSIP_BOOTSTRAP"] = p.opts.ExternalEndpoint + env["CORE_PEER_PROFILE_ENABLED"] = "false" + env["CORE_PEER_ADDRESSAUTODETECT"] = "false" + env["CORE_LOGGING_GOSSIP"] = "info" + env["FABRIC_LOGGING_SPEC"] = "info" + env["CORE_LOGGING_LEDGER"] = "info" + env["CORE_LOGGING_MSP"] = "info" + env["CORE_PEER_COMMITTER_ENABLED"] = "true" + env["CORE_PEER_DISCOVERY_TOUCHPERIOD"] = "60s" + env["CORE_PEER_GOSSIP_USELEADERELECTION"] = "false" + env["CORE_PEER_DISCOVERY_PERIOD"] = "60s" + env["CORE_METRICS_PROVIDER"] = "prometheus" + env["CORE_LOGGING_CAUTHDSL"] = "info" + env["CORE_LOGGING_POLICIES"] = "info" + env["CORE_LEDGER_STATE_STATEDATABASE"] = "goleveldb" + env["CORE_PEER_TLS_ENABLED"] = "true" + env["CORE_LOGGING_GRPC"] = "info" + env["CORE_LOGGING_PEER"] = "info" -exit 1` + return env +} - if err := os.WriteFile(filepath.Join(binExternalBuilderPath, "detect"), []byte(detectScript), 0755); err != nil { - return fmt.Errorf("failed to write detect script: %w", err) +// startDocker starts the peer in a docker container +func (p *LocalPeer) startDocker(env map[string]string, mspConfigPath, dataConfigPath string) (*StartDockerResponse, error) { + // Convert env map to array of "-e KEY=VALUE" arguments + var envArgs []string + for k, v := range env { + envArgs = append(envArgs, "-e", fmt.Sprintf("%s=%s", k, v)) } - // Create release script - releaseScript := `#!/bin/bash - -BLD="$1" -RELEASE="$2" - -if [ -d "$BLD/metadata" ]; then - cp -a "$BLD/metadata/"* "$RELEASE/" -fi + containerName, err := p.getContainerName() + if err != nil { + return nil, fmt.Errorf("failed to get container name: %w", err) + } -#external chaincodes expect artifacts to be placed under "$RELEASE"/chaincode/server -if [ -f $BLD/connection.json ]; then - mkdir -p "$RELEASE"/chaincode/server - cp $BLD/connection.json "$RELEASE"/chaincode/server + // Prepare docker run command arguments + args := []string{ + "run", + "-d", + "--name", containerName, + } + args = append(args, envArgs...) + args = append(args, + "-v", fmt.Sprintf("%s:/etc/hyperledger/fabric/msp", mspConfigPath), + "-v", fmt.Sprintf("%s:/var/hyperledger/production", dataConfigPath), + "-p", fmt.Sprintf("%s:7051", strings.Split(p.opts.ListenAddress, ":")[1]), + "-p", fmt.Sprintf("%s:7052", strings.Split(p.opts.ChaincodeAddress, ":")[1]), + "-p", fmt.Sprintf("%s:7053", strings.Split(p.opts.EventsAddress, ":")[1]), + "-p", fmt.Sprintf("%s:9443", strings.Split(p.opts.OperationsListenAddress, ":")[1]), + "hyperledger/fabric-peer:2.5.9", + "peer", + "node", + "start", + ) - #if tls_required is true, copy TLS files (using above example, the fully qualified path for these fils would be "$RELEASE"/chaincode/server/tls) + cmd := exec.Command("docker", args...) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr - exit 0 -fi + if err := cmd.Run(); err != nil { + return nil, fmt.Errorf("failed to start docker container: %w", err) + } -exit 1` + return &StartDockerResponse{ + Mode: "docker", + ContainerName: containerName, + }, nil +} - if err := os.WriteFile(filepath.Join(binExternalBuilderPath, "release"), []byte(releaseScript), 0755); err != nil { - return fmt.Errorf("failed to write release script: %w", err) +// Stop stops the peer node +func (p *LocalPeer) Stop() error { + if p.mode == "service" { + var cmd *exec.Cmd + if runtime.GOOS == "darwin" { + cmd = exec.Command("launchctl", "unload", p.getLaunchdPlistPath()) + } else { + cmd = exec.Command("systemctl", "stop", p.getServiceName()) + } + if output, err := cmd.CombinedOutput(); err != nil { + return fmt.Errorf("failed to stop service: %w: %s", err, output) + } + return nil } + p.logger.Info("Stopping peer", "opts", p.opts) - return nil + switch p.mode { + case "service": + platform := runtime.GOOS + switch platform { + case "linux": + return p.stopSystemdService() + case "darwin": + return p.stopLaunchdService() + default: + return fmt.Errorf("unsupported platform for service mode: %s", platform) + } + case "docker": + return p.stopDocker() + default: + return fmt.Errorf("invalid mode: %s", p.mode) + } } -const configYamlContent = `NodeOUs: - Enable: true - ClientOUIdentifier: - Certificate: cacerts/cacert.pem - OrganizationalUnitIdentifier: client - PeerOUIdentifier: - Certificate: cacerts/cacert.pem - OrganizationalUnitIdentifier: peer - AdminOUIdentifier: - Certificate: cacerts/cacert.pem - OrganizationalUnitIdentifier: admin - OrdererOUIdentifier: - Certificate: cacerts/cacert.pem - OrganizationalUnitIdentifier: orderer -` - -// writeConfigFiles writes the config.yaml and core.yaml files -func (p *LocalPeer) writeConfigFiles(mspConfigPath, dataConfigPath string) error { - // Write config.yaml - if err := os.WriteFile(filepath.Join(mspConfigPath, "config.yaml"), []byte(configYamlContent), 0644); err != nil { - return fmt.Errorf("failed to write config.yaml: %w", err) +// stopDocker stops the peer docker container +func (p *LocalPeer) stopDocker() error { + containerName, err := p.getContainerName() + if err != nil { + return fmt.Errorf("failed to get container name: %w", err) } - // Define template data - data := struct { - PeerID string - ListenAddress string - ChaincodeAddress string - ExternalEndpoint string - DataPath string - MSPID string - ExternalBuilderPath string - OperationsListenAddress string - }{ - PeerID: p.opts.ID, - ListenAddress: p.opts.ListenAddress, - ChaincodeAddress: p.opts.ChaincodeAddress, - ExternalEndpoint: p.opts.ExternalEndpoint, - DataPath: dataConfigPath, - MSPID: p.mspID, - ExternalBuilderPath: filepath.Join(mspConfigPath, "ccaas"), - OperationsListenAddress: p.opts.OperationsListenAddress, + // Stop the container + stopCmd := exec.Command("docker", "stop", containerName) + if err := stopCmd.Run(); err != nil { + return fmt.Errorf("failed to stop docker container: %w", err) } - const coreYamlTemplate = ` -# Copyright IBM Corp. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -# + // Remove the container + rmCmd := exec.Command("docker", "rm", "-f", containerName) + if err := rmCmd.Run(); err != nil { + p.logger.Warn("Failed to remove docker container", "error", err) + // Don't return error as the container might not exist + } -############################################################################### -# -# Peer section -# -############################################################################### -peer: + return nil +} - # The peer id provides a name for this peer instance and is used when - # naming docker resources. - id: jdoe +// stopSystemdService stops the systemd service +func (p *LocalPeer) stopSystemdService() error { + serviceName := p.getServiceName() - # The networkId allows for logical separation of networks and is used when - # naming docker resources. - networkId: dev + // Stop the service + if err := p.execSystemctl("stop", serviceName); err != nil { + return fmt.Errorf("failed to stop systemd service: %w", err) + } - # The Address at local network interface this Peer will listen on. - # By default, it will listen on all network interfaces - listenAddress: 0.0.0.0:7051 + // Disable the service + if err := p.execSystemctl("disable", serviceName); err != nil { + p.logger.Warn("Failed to disable systemd service", "error", err) + // Don't return error as this is not critical + } - # The endpoint this peer uses to listen for inbound chaincode connections. - # If this is commented-out, the listen address is selected to be - # the peer's address (see below) with port 7052 - # chaincodeListenAddress: 0.0.0.0:7052 + // Remove the service file + if err := os.Remove(p.getServiceFilePath()); err != nil { + if !os.IsNotExist(err) { + p.logger.Warn("Failed to remove service file", "error", err) + // Don't return error as this is not critical + } + } - # The endpoint the chaincode for this peer uses to connect to the peer. - # If this is not specified, the chaincodeListenAddress address is selected. - # And if chaincodeListenAddress is not specified, address is selected from - # peer address (see below). If specified peer address is invalid then it - # will fallback to the auto detected IP (local IP) regardless of the peer - # addressAutoDetect value. - # chaincodeAddress: 0.0.0.0:7052 + // Reload systemd daemon + if err := p.execSystemctl("daemon-reload"); err != nil { + p.logger.Warn("Failed to reload systemd daemon", "error", err) + // Don't return error as this is not critical + } - # When used as peer config, this represents the endpoint to other peers - # in the same organization. For peers in other organization, see - # gossip.externalEndpoint for more info. - # When used as CLI config, this means the peer's endpoint to interact with - address: 0.0.0.0:7051 + return nil +} - # Whether the Peer should programmatically determine its address - # This case is useful for docker containers. - # When set to true, will override peer address. - addressAutoDetect: false +// stopLaunchdService stops the launchd service +func (p *LocalPeer) stopLaunchdService() error { + // Stop the service + stopCmd := exec.Command("launchctl", "stop", p.getLaunchdServiceName()) + if err := stopCmd.Run(); err != nil { + p.logger.Warn("Failed to stop launchd service", "error", err) + // Continue anyway as we want to make sure it's unloaded + } - # Keepalive settings for peer server and clients - keepalive: - # Interval is the duration after which if the server does not see - # any activity from the client it pings the client to see if it's alive - interval: 7200s - # Timeout is the duration the server waits for a response - # from the client after sending a ping before closing the connection - timeout: 20s - # MinInterval is the minimum permitted time between client pings. - # If clients send pings more frequently, the peer server will - # disconnect them - minInterval: 60s - # Client keepalive settings for communicating with other peer nodes - client: - # Interval is the time between pings to peer nodes. This must - # greater than or equal to the minInterval specified by peer - # nodes - interval: 60s - # Timeout is the duration the client waits for a response from - # peer nodes before closing the connection - timeout: 20s - # DeliveryClient keepalive settings for communication with ordering - # nodes. - deliveryClient: - # Interval is the time between pings to ordering nodes. This must - # greater than or equal to the minInterval specified by ordering - # nodes. - interval: 60s - # Timeout is the duration the client waits for a response from - # ordering nodes before closing the connection - timeout: 20s + // Unload the service + unloadCmd := exec.Command("launchctl", "unload", p.getLaunchdPlistPath()) + if err := unloadCmd.Run(); err != nil { + return fmt.Errorf("failed to unload launchd service: %w", err) + } + return nil +} - # Gossip related configuration - gossip: - # Bootstrap set to initialize gossip with. - # This is a list of other peers that this peer reaches out to at startup. - # Important: The endpoints here have to be endpoints of peers in the same - # organization, because the peer would refuse connecting to these endpoints - # unless they are in the same organization as the peer. - bootstrap: 127.0.0.1:7051 +// execSystemctl executes a systemctl command +func (p *LocalPeer) execSystemctl(command string, args ...string) error { + cmdArgs := append([]string{command}, args...) + + // Check if sudo is available + sudoPath, err := exec.LookPath("sudo") + if err == nil { + // sudo is available, use it + cmdArgs = append([]string{"systemctl"}, cmdArgs...) + cmd := exec.Command(sudoPath, cmdArgs...) + if err := cmd.Run(); err != nil { + return fmt.Errorf("systemctl %s failed: %w", command, err) + } + } else { + // sudo is not available, run directly + cmd := exec.Command("systemctl", cmdArgs...) + if err := cmd.Run(); err != nil { + return fmt.Errorf("systemctl %s failed: %w", command, err) + } + } - # NOTE: orgLeader and useLeaderElection parameters are mutual exclusive. - # Setting both to true would result in the termination of the peer - # since this is undefined state. If the peers are configured with - # useLeaderElection=false, make sure there is at least 1 peer in the - # organization that its orgLeader is set to true. + return nil +} - # Defines whenever peer will initialize dynamic algorithm for - # "leader" selection, where leader is the peer to establish - # connection with ordering service and use delivery protocol - # to pull ledger blocks from ordering service. - useLeaderElection: false - # Statically defines peer to be an organization "leader", - # where this means that current peer will maintain connection - # with ordering service and disseminate block across peers in - # its own organization. Multiple peers or all peers in an organization - # may be configured as org leaders, so that they all pull - # blocks directly from ordering service. - orgLeader: true +// RenewCertificates renews the peer's TLS and signing certificates +func (p *LocalPeer) RenewCertificates(peerDeploymentConfig *types.FabricPeerDeploymentConfig) error { - # Interval for membershipTracker polling - membershipTrackerInterval: 5s + ctx := context.Background() + p.logger.Info("Starting certificate renewal for peer", "peerID", p.opts.ID) - # Overrides the endpoint that the peer publishes to peers - # in its organization. For peers in foreign organizations - # see 'externalEndpoint' - endpoint: - # Maximum count of blocks stored in memory - maxBlockCountToStore: 10 - # Max time between consecutive message pushes(unit: millisecond) - maxPropagationBurstLatency: 10ms - # Max number of messages stored until a push is triggered to remote peers - maxPropagationBurstSize: 10 - # Number of times a message is pushed to remote peers - propagateIterations: 1 - # Number of peers selected to push messages to - propagatePeerNum: 3 - # Determines frequency of pull phases(unit: second) - # Must be greater than digestWaitTime + responseWaitTime - pullInterval: 4s - # Number of peers to pull from - pullPeerNum: 3 - # Determines frequency of pulling state info messages from peers(unit: second) - requestStateInfoInterval: 4s - # Determines frequency of pushing state info messages to peers(unit: second) - publishStateInfoInterval: 4s - # Maximum time a stateInfo message is kept until expired - stateInfoRetentionInterval: - # Time from startup certificates are included in Alive messages(unit: second) - publishCertPeriod: 10s - # Should we skip verifying block messages or not (currently not in use) - skipBlockVerification: false - # Dial timeout(unit: second) - dialTimeout: 3s - # Connection timeout(unit: second) - connTimeout: 2s - # Buffer size of received messages - recvBuffSize: 20 - # Buffer size of sending messages - sendBuffSize: 200 - # Time to wait before pull engine processes incoming digests (unit: second) - # Should be slightly smaller than requestWaitTime - digestWaitTime: 1s - # Time to wait before pull engine removes incoming nonce (unit: milliseconds) - # Should be slightly bigger than digestWaitTime - requestWaitTime: 1500ms - # Time to wait before pull engine ends pull (unit: second) - responseWaitTime: 2s - # Alive check interval(unit: second) - aliveTimeInterval: 5s - # Alive expiration timeout(unit: second) - aliveExpirationTimeout: 25s - # Reconnect interval(unit: second) - reconnectInterval: 25s - # Max number of attempts to connect to a peer - maxConnectionAttempts: 120 - # Message expiration factor for alive messages - msgExpirationFactor: 20 - # This is an endpoint that is published to peers outside of the organization. - # If this isn't set, the peer will not be known to other organizations. - externalEndpoint: - # Leader election service configuration - election: - # Longest time peer waits for stable membership during leader election startup (unit: second) - startupGracePeriod: 15s - # Interval gossip membership samples to check its stability (unit: second) - membershipSampleInterval: 1s - # Time passes since last declaration message before peer decides to perform leader election (unit: second) - leaderAliveThreshold: 10s - # Time between peer sends propose message and declares itself as a leader (sends declaration message) (unit: second) - leaderElectionDuration: 5s + // Get organization details + org, err := p.orgService.GetOrganization(ctx, p.organizationID) + if err != nil { + return fmt.Errorf("failed to get organization: %w", err) + } - pvtData: - # pullRetryThreshold determines the maximum duration of time private data corresponding for a given block - # would be attempted to be pulled from peers until the block would be committed without the private data - pullRetryThreshold: 60s - # As private data enters the transient store, it is associated with the peer's ledger's height at that time. - # transientstoreMaxBlockRetention defines the maximum difference between the current ledger's height upon commit, - # and the private data residing inside the transient store that is guaranteed not to be purged. - # Private data is purged from the transient store when blocks with sequences that are multiples - # of transientstoreMaxBlockRetention are committed. - transientstoreMaxBlockRetention: 1000 - # pushAckTimeout is the maximum time to wait for an acknowledgement from each peer - # at private data push at endorsement time. - pushAckTimeout: 3s - # Block to live pulling margin, used as a buffer - # to prevent peer from trying to pull private data - # from peers that is soon to be purged in next N blocks. - # This helps a newly joined peer catch up to current - # blockchain height quicker. - btlPullMargin: 10 - # the process of reconciliation is done in an endless loop, while in each iteration reconciler tries to - # pull from the other peers the most recent missing blocks with a maximum batch size limitation. - # reconcileBatchSize determines the maximum batch size of missing private data that will be reconciled in a - # single iteration. - reconcileBatchSize: 10 - # reconcileSleepInterval determines the time reconciler sleeps from end of an iteration until the beginning - # of the next reconciliation iteration. - reconcileSleepInterval: 1m - # reconciliationEnabled is a flag that indicates whether private data reconciliation is enable or not. - reconciliationEnabled: true - # skipPullingInvalidTransactionsDuringCommit is a flag that indicates whether pulling of invalid - # transaction's private data from other peers need to be skipped during the commit time and pulled - # only through reconciler. - skipPullingInvalidTransactionsDuringCommit: false - # implicitCollectionDisseminationPolicy specifies the dissemination policy for the peer's own implicit collection. - # When a peer endorses a proposal that writes to its own implicit collection, below values override the default values - # for disseminating private data. - # Note that it is applicable to all channels the peer has joined. The implication is that requiredPeerCount has to - # be smaller than the number of peers in a channel that has the lowest numbers of peers from the organization. - implicitCollectionDisseminationPolicy: - # requiredPeerCount defines the minimum number of eligible peers to which the peer must successfully - # disseminate private data for its own implicit collection during endorsement. Default value is 0. - requiredPeerCount: 0 - # maxPeerCount defines the maximum number of eligible peers to which the peer will attempt to - # disseminate private data for its own implicit collection during endorsement. Default value is 1. - maxPeerCount: 1 + if peerDeploymentConfig.SignKeyID == 0 || peerDeploymentConfig.TLSKeyID == 0 { + return fmt.Errorf("peer node does not have required key IDs") + } - # Gossip state transfer related configuration - state: - # indicates whenever state transfer is enabled or not - # default value is true, i.e. state transfer is active - # and takes care to sync up missing blocks allowing - # lagging peer to catch up to speed with rest network - enabled: false - # checkInterval interval to check whether peer is lagging behind enough to - # request blocks via state transfer from another peer. - checkInterval: 10s - # responseTimeout amount of time to wait for state transfer response from - # other peers - responseTimeout: 3s - # batchSize the number of blocks to request via state transfer from another peer - batchSize: 10 - # blockBufferSize reflects the size of the re-ordering buffer - # which captures blocks and takes care to deliver them in order - # down to the ledger layer. The actual buffer size is bounded between - # 0 and 2*blockBufferSize, each channel maintains its own buffer - blockBufferSize: 20 - # maxRetries maximum number of re-tries to ask - # for single state transfer request - maxRetries: 3 + // Get the CA certificates + signCAKey, err := p.keyService.GetKey(ctx, int(org.SignKeyID.Int64)) + if err != nil { + return fmt.Errorf("failed to get sign CA key: %w", err) + } - # TLS Settings - tls: - # Require server-side TLS - enabled: false - # Require client certificates / mutual TLS. - # Note that clients that are not configured to use a certificate will - # fail to connect to the peer. - clientAuthRequired: false - # X.509 certificate used for TLS server - cert: - file: tls/server.crt - # Private key used for TLS server (and client if clientAuthEnabled - # is set to true - key: - file: tls/server.key - # Trusted root certificate chain for tls.cert - rootcert: - file: tls/ca.crt - # Set of root certificate authorities used to verify client certificates - clientRootCAs: - files: - - tls/ca.crt - # Private key used for TLS when making client connections. If - # not set, peer.tls.key.file will be used instead - clientKey: - file: - # X.509 certificate used for TLS when making client connections. - # If not set, peer.tls.cert.file will be used instead - clientCert: - file: + tlsCAKey, err := p.keyService.GetKey(ctx, int(org.TlsRootKeyID.Int64)) + if err != nil { + return fmt.Errorf("failed to get TLS CA key: %w", err) + } + // In case the sign key is not signed by the CA, set the signing key ID to the CA key ID + signKeyDB, err := p.keyService.GetKey(ctx, int(peerDeploymentConfig.SignKeyID)) + if err != nil { + return fmt.Errorf("failed to get sign private key: %w", err) + } + if signKeyDB.SigningKeyID == nil || *signKeyDB.SigningKeyID == 0 { + // Set the signing key ID to the organization's sign CA key ID + err = p.keyService.SetSigningKeyIDForKey(ctx, int(peerDeploymentConfig.SignKeyID), int(signCAKey.ID)) + if err != nil { + return fmt.Errorf("failed to set signing key ID for sign key: %w", err) + } + } + + tlsKeyDB, err := p.keyService.GetKey(ctx, int(peerDeploymentConfig.TLSKeyID)) + if err != nil { + return fmt.Errorf("failed to get TLS private key: %w", err) + } + + if tlsKeyDB.SigningKeyID == nil || *tlsKeyDB.SigningKeyID == 0 { + // Set the signing key ID to the organization's sign CA key ID + err = p.keyService.SetSigningKeyIDForKey(ctx, int(peerDeploymentConfig.TLSKeyID), int(tlsCAKey.ID)) + if err != nil { + return fmt.Errorf("failed to set signing key ID for TLS key: %w", err) + } + } + // Renew signing certificate + validFor := kmodels.Duration(time.Hour * 24 * 365) // 1 year validity + _, err = p.keyService.RenewCertificate(ctx, int(peerDeploymentConfig.SignKeyID), kmodels.CertificateRequest{ + CommonName: p.opts.ID, + Organization: []string{org.MspID}, + OrganizationalUnit: []string{"peer"}, + DNSNames: []string{p.opts.ID}, + IsCA: false, + ValidFor: validFor, + KeyUsage: x509.KeyUsageCertSign, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, + }) + if err != nil { + return fmt.Errorf("failed to renew signing certificate: %w", err) + } + + // Renew TLS certificate + domainNames := p.opts.DomainNames + var ipAddresses []net.IP + var domains []string + + // Ensure localhost and 127.0.0.1 are included + hasLocalhost := false + hasLoopback := false + for _, domain := range domainNames { + if domain == "localhost" { + hasLocalhost = true + domains = append(domains, domain) + continue + } + if domain == "127.0.0.1" { + hasLoopback = true + ipAddresses = append(ipAddresses, net.ParseIP(domain)) + continue + } + if ip := net.ParseIP(domain); ip != nil { + ipAddresses = append(ipAddresses, ip) + } else { + domains = append(domains, domain) + } + } + if !hasLocalhost { + domains = append(domains, "localhost") + } + if !hasLoopback { + ipAddresses = append(ipAddresses, net.ParseIP("127.0.0.1")) + } - # Authentication contains configuration parameters related to authenticating - # client messages - authentication: - # the acceptable difference between the current server time and the - # client's time as specified in a client request message - timewindow: 15m + _, err = p.keyService.RenewCertificate(ctx, int(peerDeploymentConfig.TLSKeyID), kmodels.CertificateRequest{ + CommonName: p.opts.ID, + Organization: []string{org.MspID}, + OrganizationalUnit: []string{"peer"}, + DNSNames: domains, + IPAddresses: ipAddresses, + IsCA: false, + ValidFor: validFor, + KeyUsage: x509.KeyUsageCertSign, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, + }) + if err != nil { + return fmt.Errorf("failed to renew TLS certificate: %w", err) + } - # Path on the file system where peer will store data (eg ledger). This - # location must be access control protected to prevent unintended - # modification that might corrupt the peer operations. - fileSystemPath: {{.DataPath}} + // Get the private keys + signKey, err := p.keyService.GetDecryptedPrivateKey(int(peerDeploymentConfig.SignKeyID)) + if err != nil { + return fmt.Errorf("failed to get sign private key: %w", err) + } - # BCCSP (Blockchain crypto provider): Select which crypto implementation or - # library to use - BCCSP: - Default: SW - # Settings for the SW crypto provider (i.e. when DEFAULT: SW) - SW: - # TODO: The default Hash and Security level needs refactoring to be - # fully configurable. Changing these defaults requires coordination - # SHA2 is hardcoded in several places, not only BCCSP - Hash: SHA2 - Security: 256 - # Location of Key Store - FileKeyStore: - # If "", defaults to 'mspConfigPath'/keystore - KeyStore: - # Settings for the PKCS#11 crypto provider (i.e. when DEFAULT: PKCS11) - PKCS11: - # Location of the PKCS11 module library - Library: - # Token Label - Label: - # User PIN - Pin: - Hash: - Security: + tlsKey, err := p.keyService.GetDecryptedPrivateKey(int(peerDeploymentConfig.TLSKeyID)) + if err != nil { + return fmt.Errorf("failed to get TLS private key: %w", err) + } - # Path on the file system where peer will find MSP local configurations - mspConfigPath: msp + // Update the certificates in the MSP directory + peerPath := p.getPeerPath() + mspConfigPath := filepath.Join(peerPath, "config") + + err = p.writeCertificatesAndKeys( + mspConfigPath, + tlsKeyDB, + signKeyDB, + tlsKey, + signKey, + signCAKey, + tlsCAKey, + ) + if err != nil { + return fmt.Errorf("failed to write renewed certificates: %w", err) + } - # Identifier of the local MSP - # ----!!!!IMPORTANT!!!-!!!IMPORTANT!!!-!!!IMPORTANT!!!!---- - # Deployers need to change the value of the localMspId string. - # In particular, the name of the local MSP ID of a peer needs - # to match the name of one of the MSPs in each of the channel - # that this peer is a member of. Otherwise this peer's messages - # will not be identified as valid by other nodes. - localMspId: SampleOrg + // Restart the peer + _, err = p.Start() + if err != nil { + return fmt.Errorf("failed to restart peer after certificate renewal: %w", err) + } - # CLI common client config options - client: - # connection timeout - connTimeout: 3s + p.logger.Info("Successfully renewed peer certificates", "peerID", p.opts.ID) + p.logger.Info("Restarting peer after certificate renewal") + // Stop the peer before renewing certificates + if err := p.Stop(); err != nil { + return fmt.Errorf("failed to stop peer before certificate renewal: %w", err) + } + p.logger.Info("Successfully stopped peer before certificate renewal") + p.logger.Info("Starting peer after certificate renewal") + _, err = p.Start() + if err != nil { + return fmt.Errorf("failed to start peer after certificate renewal: %w", err) + } + p.logger.Info("Successfully started peer after certificate renewal") + return nil +} - # Delivery service related config - deliveryclient: - # It sets the total time the delivery service may spend in reconnection - # attempts until its retry logic gives up and returns an error - reconnectTotalTimeThreshold: 3600s +type NetworkConfigResponse struct { + NetworkConfig string +} +type Org struct { + MSPID string + CertAuths []string + Peers []string + Orderers []string +} +type Peer struct { + Name string + URL string + TLSCACert string +} +type CA struct { + Name string + URL string + TLSCert string + EnrollID string + EnrollSecret string +} - # It sets the delivery service <-> ordering service node connection timeout - connTimeout: 3s +type Orderer struct { + URL string + Name string + TLSCACert string +} - # It sets the delivery service maximal delay between consecutive retries - reConnectBackoffThreshold: 3600s +const tmplGoConfig = ` +name: hlf-network +version: 1.0.0 +client: + organization: "{{ .Organization }}" +{{- if not .Organizations }} +organizations: {} +{{- else }} +organizations: + {{ range $org := .Organizations }} + {{ $org.MSPID }}: + mspid: {{ $org.MSPID }} + cryptoPath: /tmp/cryptopath + users: {} +{{- if not $org.CertAuths }} + certificateAuthorities: [] +{{- else }} + certificateAuthorities: + {{- range $ca := $org.CertAuths }} + - {{ $ca.Name }} + {{- end }} +{{- end }} +{{- if not $org.Peers }} + peers: [] +{{- else }} + peers: + {{- range $peer := $org.Peers }} + - {{ $peer }} + {{- end }} +{{- end }} +{{- if not $org.Orderers }} + orderers: [] +{{- else }} + orderers: + {{- range $orderer := $org.Orderers }} + - {{ $orderer }} + {{- end }} - # A list of orderer endpoint addresses which should be overridden - # when found in channel configurations. - addressOverrides: - # - from: - # to: - # caCertsFile: - # - from: - # to: - # caCertsFile: + {{- end }} +{{- end }} +{{- end }} - # Type for the local MSP - by default it's of type bccsp - localMspType: bccsp +{{- if not .Orderers }} +{{- else }} +orderers: +{{- range $orderer := .Orderers }} + {{$orderer.Name}}: + url: {{ $orderer.URL }} + grpcOptions: + allow-insecure: false + tlsCACerts: + pem: | +{{ $orderer.TLSCACert | indent 8 }} +{{- end }} +{{- end }} - # Used with Go profiling tools only in none production environment. In - # production, it should be disabled (eg enabled: false) - profile: - enabled: false - listenAddress: 0.0.0.0:6060 +{{- if not .Peers }} +{{- else }} +peers: + {{- range $peer := .Peers }} + {{$peer.Name}}: + url: {{ $peer.URL }} + tlsCACerts: + pem: | +{{ $peer.TLSCACert | indent 8 }} +{{- end }} +{{- end }} - # Handlers defines custom handlers that can filter and mutate - # objects passing within the peer, such as: - # Auth filter - reject or forward proposals from clients - # Decorators - append or mutate the chaincode input passed to the chaincode - # Endorsers - Custom signing over proposal response payload and its mutation - # Valid handler definition contains: - # - A name which is a factory method name defined in - # core/handlers/library/library.go for statically compiled handlers - # - library path to shared object binary for pluggable filters - # Auth filters and decorators are chained and executed in the order that - # they are defined. For example: - # authFilters: - # - - # name: FilterOne - # library: /opt/lib/filter.so - # - - # name: FilterTwo - # decorators: - # - - # name: DecoratorOne - # - - # name: DecoratorTwo - # library: /opt/lib/decorator.so - # Endorsers are configured as a map that its keys are the endorsement system chaincodes that are being overridden. - # Below is an example that overrides the default ESCC and uses an endorsement plugin that has the same functionality - # as the default ESCC. - # If the 'library' property is missing, the name is used as the constructor method in the builtin library similar - # to auth filters and decorators. - # endorsers: - # escc: - # name: DefaultESCC - # library: /etc/hyperledger/fabric/plugin/escc.so - handlers: - authFilters: - - - name: DefaultAuth - - - name: ExpirationCheck # This filter checks identity x509 certificate expiration - decorators: - - - name: DefaultDecorator - endorsers: - escc: - name: DefaultEndorsement - library: - validators: - vscc: - name: DefaultValidation - library: +{{- if not .CertAuths }} +{{- else }} +certificateAuthorities: +{{- range $ca := .CertAuths }} + {{ $ca.Name }}: + url: https://{{ $ca.URL }} +{{if $ca.EnrollID }} + registrar: + enrollId: {{ $ca.EnrollID }} + enrollSecret: "{{ $ca.EnrollSecret }}" +{{ end }} + caName: {{ $ca.CAName }} + tlsCACerts: + pem: + - | +{{ $ca.TLSCert | indent 12 }} - # library: /etc/hyperledger/fabric/plugin/escc.so - # Number of goroutines that will execute transaction validation in parallel. - # By default, the peer chooses the number of CPUs on the machine. Set this - # variable to override that choice. - # NOTE: overriding this value might negatively influence the performance of - # the peer so please change this value only if you know what you're doing - validatorPoolSize: +{{- end }} +{{- end }} - # The discovery service is used by clients to query information about peers, - # such as - which peers have joined a certain channel, what is the latest - # channel config, and most importantly - given a chaincode and a channel, - # what possible sets of peers satisfy the endorsement policy. - discovery: - enabled: true - # Whether the authentication cache is enabled or not. - authCacheEnabled: true - # The maximum size of the cache, after which a purge takes place - authCacheMaxSize: 1000 - # The proportion (0 to 1) of entries that remain in the cache after the cache is purged due to overpopulation - authCachePurgeRetentionRatio: 0.75 - # Whether to allow non-admins to perform non channel scoped queries. - # When this is false, it means that only peer admins can perform non channel scoped queries. - orgMembersAllowedAccess: false +channels: + _default: +{{- if not .Orderers }} + orderers: [] +{{- else }} + orderers: +{{- range $orderer := .Orderers }} + - {{$orderer.Name}} +{{- end }} +{{- end }} +{{- if not .Peers }} + peers: {} +{{- else }} + peers: +{{- range $peer := .Peers }} + {{$peer.Name}}: + discover: true + endorsingPeer: true + chaincodeQuery: true + ledgerQuery: true + eventSource: true +{{- end }} +{{- end }} - # Limits is used to configure some internal resource limits. - limits: - # Concurrency limits the number of concurrently running requests to a service on each peer. - # Currently this option is only applied to endorser service and deliver service. - # When the property is missing or the value is 0, the concurrency limit is disabled for the service. - concurrency: - # endorserService limits concurrent requests to endorser service that handles chaincode deployment, query and invocation, - # including both user chaincodes and system chaincodes. - endorserService: 2500 - # deliverService limits concurrent event listeners registered to deliver service for blocks and transaction events. - deliverService: 2500 +` -############################################################################### -# -# VM section -# -############################################################################### -vm: +func (p *LocalPeer) generateNetworkConfigForPeer( + peerUrl string, peerMspID string, peerTlsCACert string, ordererUrl string, ordererTlsCACert string) (*NetworkConfigResponse, error) { - # Endpoint of the vm management system. For docker can be one of the following in general - # unix:///var/run/docker.sock - # http://localhost:2375 - # https://localhost:2376 - endpoint: "" + tmpl, err := template.New("networkConfig").Funcs(sprig.HermeticTxtFuncMap()).Parse(tmplGoConfig) + if err != nil { + return nil, err + } + var buf bytes.Buffer + orgs := []*Org{} + var peers []*Peer + var certAuths []*CA + var ordererNodes []*Orderer - # settings for docker vms - docker: - tls: - enabled: false - ca: - file: docker/ca.crt - cert: - file: docker/tls.crt - key: - file: docker/tls.key + org := &Org{ + MSPID: peerMspID, + CertAuths: []string{}, + Peers: []string{}, + Orderers: []string{}, + } + orgs = append(orgs, org) + if peerTlsCACert != "" { + peer := &Peer{ + Name: "peer0", + URL: peerUrl, + TLSCACert: peerTlsCACert, + } + org.Peers = append(org.Peers, "peer0") + peers = append(peers, peer) + } + if ordererTlsCACert != "" && ordererUrl != "" { + orderer := &Orderer{ + URL: ordererUrl, + Name: "orderer0", + TLSCACert: ordererTlsCACert, + } + ordererNodes = append(ordererNodes, orderer) + } + err = tmpl.Execute(&buf, map[string]interface{}{ + "Peers": peers, + "Orderers": ordererNodes, + "Organizations": orgs, + "CertAuths": certAuths, + "Organization": peerMspID, + "Internal": false, + }) + if err != nil { + return nil, err + } + p.logger.Debugf("Network config: %s", buf.String()) + return &NetworkConfigResponse{ + NetworkConfig: buf.String(), + }, nil +} - # Enables/disables the standard out/err from chaincode containers for - # debugging purposes - attachStdout: false +// JoinChannel joins the peer to a channel +func (p *LocalPeer) JoinChannel(genesisBlock []byte) error { + p.logger.Info("Joining peer to channel", "peer", p.opts.ID) + var genesisBlockProto cb.Block + err := proto.Unmarshal(genesisBlock, &genesisBlockProto) + if err != nil { + return fmt.Errorf("failed to unmarshal genesis block: %w", err) + } + ctx := context.Background() + tlsCACert, err := p.GetTLSRootCACert(ctx) + if err != nil { + return fmt.Errorf("failed to get TLS root CA cert: %w", err) + } + peerConn, err := p.CreatePeerConnection(ctx, p.opts.ExternalEndpoint, tlsCACert) + if err != nil { + return fmt.Errorf("failed to create peer connection: %w", err) + } + defer peerConn.Close() - # Parameters on creating docker container. - # Container may be efficiently created using ipam & dns-server for cluster - # NetworkMode - sets the networking mode for the container. Supported - # Dns - a list of DNS servers for the container to use. - # Docker Host Config are not supported and will not be used if set. - # LogConfig - sets the logging driver (Type) and related options - # (Config) for Docker. For more info, - # https://docs.docker.com/engine/admin/logging/overview/ - # Note: Set LogConfig using Environment Variables is not supported. - hostConfig: - NetworkMode: host - Dns: - # - 192.168.0.1 - LogConfig: - Type: json-file - Config: - max-size: "50m" - max-file: "5" - Memory: 2147483648 + adminIdentity, _, err := p.GetAdminIdentity(ctx) + if err != nil { + return fmt.Errorf("failed to get admin identity: %w", err) + } -############################################################################### -# -# Chaincode section -# -############################################################################### -chaincode: + err = channel.JoinChannel(ctx, peerConn, adminIdentity, &genesisBlockProto) + if err != nil { + return fmt.Errorf("failed to join channel: %w", err) + } - # The id is used by the Chaincode stub to register the executing Chaincode - # ID with the Peer and is generally supplied through ENV variables - id: - path: - name: + return nil - # Generic builder environment, suitable for most chaincode types - builder: $(DOCKER_NS)/fabric-ccenv:$(TWO_DIGIT_VERSION) +} - pull: false +// writeCertificatesAndKeys writes the certificates and keys to the MSP directory structure +func (p *LocalPeer) writeCertificatesAndKeys( + mspConfigPath string, + tlsCert *kmodels.KeyResponse, + signCert *kmodels.KeyResponse, + tlsKey string, + signKey string, + signCACert *kmodels.KeyResponse, + tlsCACert *kmodels.KeyResponse, +) error { + // Write TLS certificates and keys + if err := os.WriteFile(filepath.Join(mspConfigPath, "tls.crt"), []byte(*tlsCert.Certificate), 0644); err != nil { + return fmt.Errorf("failed to write TLS certificate: %w", err) + } + if err := os.WriteFile(filepath.Join(mspConfigPath, "tls.key"), []byte(tlsKey), 0600); err != nil { + return fmt.Errorf("failed to write TLS key: %w", err) + } - golang: - # golang will never need more than baseos - runtime: $(DOCKER_NS)/fabric-baseos:$(TWO_DIGIT_VERSION) + // Create and write to signcerts directory + signcertsPath := filepath.Join(mspConfigPath, "signcerts") + if err := os.MkdirAll(signcertsPath, 0755); err != nil { + return fmt.Errorf("failed to create signcerts directory: %w", err) + } + if err := os.WriteFile(filepath.Join(signcertsPath, "cert.pem"), []byte(*signCert.Certificate), 0644); err != nil { + return fmt.Errorf("failed to write signing certificate: %w", err) + } - # whether or not golang chaincode should be linked dynamically - dynamicLink: false + // Write root CA certificate + if err := os.WriteFile(filepath.Join(mspConfigPath, "cacert.pem"), []byte(*signCACert.Certificate), 0644); err != nil { + return fmt.Errorf("failed to write CA certificate: %w", err) + } - java: - # This is an image based on java:openjdk-8 with addition compiler - # tools added for java shim layer packaging. - # This image is packed with shim layer libraries that are necessary - # for Java chaincode runtime. - runtime: $(DOCKER_NS)/fabric-javaenv:$(TWO_DIGIT_VERSION) + // Create and write to cacerts directory + cacertsPath := filepath.Join(mspConfigPath, "cacerts") + if err := os.MkdirAll(cacertsPath, 0755); err != nil { + return fmt.Errorf("failed to create cacerts directory: %w", err) + } + if err := os.WriteFile(filepath.Join(cacertsPath, "cacert.pem"), []byte(*signCACert.Certificate), 0644); err != nil { + return fmt.Errorf("failed to write CA certificate to cacerts: %w", err) + } - node: - # This is an image based on node:$(NODE_VER)-alpine - runtime: $(DOCKER_NS)/fabric-nodeenv:$(TWO_DIGIT_VERSION) + // Create and write to tlscacerts directory + tlscacertsPath := filepath.Join(mspConfigPath, "tlscacerts") + if err := os.MkdirAll(tlscacertsPath, 0755); err != nil { + return fmt.Errorf("failed to create tlscacerts directory: %w", err) + } + if err := os.WriteFile(filepath.Join(tlscacertsPath, "cacert.pem"), []byte(*tlsCACert.Certificate), 0644); err != nil { + return fmt.Errorf("failed to write TLS CA certificate: %w", err) + } - # List of directories to treat as external builders and launchers for - # chaincode. The external builder detection processing will iterate over the - # builders in the order specified below. - externalBuilders: - - name: ccaas_builder - path: {{.ExternalBuilderPath}} - # The maximum duration to wait for the chaincode build and install process - # to complete. - installTimeout: 8m0s + // Create and write to keystore directory + keystorePath := filepath.Join(mspConfigPath, "keystore") + if err := os.MkdirAll(keystorePath, 0755); err != nil { + return fmt.Errorf("failed to create keystore directory: %w", err) + } + if err := os.WriteFile(filepath.Join(keystorePath, "key.pem"), []byte(signKey), 0600); err != nil { + return fmt.Errorf("failed to write signing key: %w", err) + } - # Timeout duration for starting up a container and waiting for Register - # to come through. - startuptimeout: 5m0s + return nil +} - # Timeout duration for Invoke and Init calls to prevent runaway. - # This timeout is used by all chaincodes in all the channels, including - # system chaincodes. - # Note that during Invoke, if the image is not available (e.g. being - # cleaned up when in development environment), the peer will automatically - # build the image, which might take more time. In production environment, - # the chaincode image is unlikely to be deleted, so the timeout could be - # reduced accordingly. - executetimeout: 30s +// setupExternalBuilders creates and configures the external builders for chaincode +func (p *LocalPeer) setupExternalBuilders(mspConfigPath string) error { + // Create external builder directory structure + rootExternalBuilderPath := filepath.Join(mspConfigPath, "ccaas") + binExternalBuilderPath := filepath.Join(rootExternalBuilderPath, "bin") + if err := os.MkdirAll(binExternalBuilderPath, 0755); err != nil { + return fmt.Errorf("failed to create external builder directory: %w", err) + } - # There are 2 modes: "dev" and "net". - # In dev mode, user runs the chaincode after starting peer from - # command line on local machine. - # In net mode, peer will run chaincode in a docker container. - mode: net + // Create build script + buildScript := `#!/bin/bash - # keepalive in seconds. In situations where the communication goes through a - # proxy that does not support keep-alive, this parameter will maintain connection - # between peer and chaincode. - # A value <= 0 turns keepalive off - keepalive: 0 +SOURCE=$1 +OUTPUT=$3 - # enabled system chaincodes - system: - _lifecycle: enable - cscc: enable - lscc: enable - escc: enable - vscc: enable - qscc: enable +#external chaincodes expect connection.json file in the chaincode package +if [ ! -f "$SOURCE/connection.json" ]; then + >&2 echo "$SOURCE/connection.json not found" + exit 1 +fi - # Logging section for the chaincode container - logging: - # Default level for all loggers within the chaincode container - level: info - # Override default level for the 'shim' logger - shim: warning - # Format for the chaincode container logs - format: '%{color}%{time:2006-01-02 15:04:05.000 MST} [%{module}] %{shortfunc} -> %{level:.4s} %{id:03x}%{color:reset} %{message}' +#simply copy the endpoint information to specified output location +cp $SOURCE/connection.json $OUTPUT/connection.json -############################################################################### -# -# Ledger section - ledger configuration encompasses both the blockchain -# and the state -# -############################################################################### -ledger: +if [ -d "$SOURCE/metadata" ]; then + cp -a $SOURCE/metadata $OUTPUT/metadata +fi - blockchain: - snapshots: - rootDir: {{.DataPath}}/snapshots +exit 0` - state: - # stateDatabase - options are "goleveldb", "CouchDB" - # goleveldb - default state database stored in goleveldb. - # CouchDB - store state database in CouchDB - stateDatabase: goleveldb - # Limit on the number of records to return per query - totalQueryLimit: 100000 - couchDBConfig: - # It is recommended to run CouchDB on the same server as the peer, and - # not map the CouchDB container port to a server port in docker-compose. - # Otherwise proper security must be provided on the connection between - # CouchDB client (on the peer) and server. - couchDBAddress: 127.0.0.1:5984 - # This username must have read and write authority on CouchDB - username: - # The password is recommended to pass as an environment variable - # during start up (eg CORE_LEDGER_STATE_COUCHDBCONFIG_PASSWORD). - # If it is stored here, the file must be access control protected - # to prevent unintended users from discovering the password. - password: - # Number of retries for CouchDB errors - maxRetries: 3 - # Number of retries for CouchDB errors during peer startup. - # The delay between retries doubles for each attempt. - # Default of 10 retries results in 11 attempts over 2 minutes. - maxRetriesOnStartup: 10 - # CouchDB request timeout (unit: duration, e.g. 20s) - requestTimeout: 35s - # Limit on the number of records per each CouchDB query - # Note that chaincode queries are only bound by totalQueryLimit. - # Internally the chaincode may execute multiple CouchDB queries, - # each of size internalQueryLimit. - internalQueryLimit: 1000 - # Limit on the number of records per CouchDB bulk update batch - maxBatchUpdateSize: 1000 - # Warm indexes after every N blocks. - # This option warms any indexes that have been - # deployed to CouchDB after every N blocks. - # A value of 1 will warm indexes after every block commit, - # to ensure fast selector queries. - # Increasing the value may improve write efficiency of peer and CouchDB, - # but may degrade query response time. - warmIndexesAfterNBlocks: 1 - # Create the _global_changes system database - # This is optional. Creating the global changes database will require - # additional system resources to track changes and maintain the database - createGlobalChangesDB: false - # CacheSize denotes the maximum mega bytes (MB) to be allocated for the in-memory state - # cache. Note that CacheSize needs to be a multiple of 32 MB. If it is not a multiple - # of 32 MB, the peer would round the size to the next multiple of 32 MB. - # To disable the cache, 0 MB needs to be assigned to the cacheSize. - cacheSize: 64 + if err := os.WriteFile(filepath.Join(binExternalBuilderPath, "build"), []byte(buildScript), 0755); err != nil { + return fmt.Errorf("failed to write build script: %w", err) + } - history: - # enableHistoryDatabase - options are true or false - # Indicates if the history of key updates should be stored. - # All history 'index' will be stored in goleveldb, regardless if using - # CouchDB or alternate database for the state. - enableHistoryDatabase: true + // Create detect script + detectScript := `#!/bin/bash - pvtdataStore: - # the maximum db batch size for converting - # the ineligible missing data entries to eligible missing data entries - collElgProcMaxDbBatchSize: 5000 - # the minimum duration (in milliseconds) between writing - # two consecutive db batches for converting the ineligible missing data entries to eligible missing data entries - collElgProcDbBatchesInterval: 1000 +METADIR=$2 +# check if the "type" field is set to "external" +# crude way without jq which is not in the default fabric peer image +TYPE=$(tr -d '\n' < "$METADIR/metadata.json" | awk -F':' '{ for (i = 1; i < NF; i++){ if ($i~/type/) { print $(i+1); break }}}'| cut -d\" -f2) -############################################################################### -# -# Operations section -# -############################################################################### -operations: - # host and port for the operations server - listenAddress: 127.0.0.1:9443 +if [ "$TYPE" = "ccaas" ]; then + exit 0 +fi - # TLS configuration for the operations endpoint - tls: - # TLS enabled - enabled: false +exit 1` - # path to PEM encoded server certificate for the operations server - cert: - file: + if err := os.WriteFile(filepath.Join(binExternalBuilderPath, "detect"), []byte(detectScript), 0755); err != nil { + return fmt.Errorf("failed to write detect script: %w", err) + } - # path to PEM encoded server key for the operations server - key: - file: + // Create release script + releaseScript := `#!/bin/bash - # most operations service endpoints require client authentication when TLS - # is enabled. clientAuthRequired requires client certificate authentication - # at the TLS layer to access all resources. - clientAuthRequired: false +BLD="$1" +RELEASE="$2" - # paths to PEM encoded ca certificates to trust for client authentication - clientRootCAs: - files: [] +if [ -d "$BLD/metadata" ]; then + cp -a "$BLD/metadata/"* "$RELEASE/" +fi -############################################################################### -# -# Metrics section -# -############################################################################### -metrics: - # metrics provider is one of statsd, prometheus, or disabled - provider: disabled +#external chaincodes expect artifacts to be placed under "$RELEASE"/chaincode/server +if [ -f $BLD/connection.json ]; then + mkdir -p "$RELEASE"/chaincode/server + cp $BLD/connection.json "$RELEASE"/chaincode/server - # statsd configuration - statsd: - # network type: tcp or udp - network: udp + #if tls_required is true, copy TLS files (using above example, the fully qualified path for these fils would be "$RELEASE"/chaincode/server/tls) - # statsd server address - address: 127.0.0.1:8125 + exit 0 +fi - # the interval at which locally cached counters and gauges are pushed - # to statsd; timings are pushed immediately - writeInterval: 10s +exit 1` - # prefix is prepended to all emitted statsd metrics - prefix: + if err := os.WriteFile(filepath.Join(binExternalBuilderPath, "release"), []byte(releaseScript), 0755); err != nil { + return fmt.Errorf("failed to write release script: %w", err) + } + return nil +} + +const configYamlContent = `NodeOUs: + Enable: true + ClientOUIdentifier: + Certificate: cacerts/cacert.pem + OrganizationalUnitIdentifier: client + PeerOUIdentifier: + Certificate: cacerts/cacert.pem + OrganizationalUnitIdentifier: peer + AdminOUIdentifier: + Certificate: cacerts/cacert.pem + OrganizationalUnitIdentifier: admin + OrdererOUIdentifier: + Certificate: cacerts/cacert.pem + OrganizationalUnitIdentifier: orderer ` +// writeConfigFiles writes the config.yaml and core.yaml files +func (p *LocalPeer) writeConfigFiles(mspConfigPath, dataConfigPath string) error { + // Write config.yaml + if err := os.WriteFile(filepath.Join(mspConfigPath, "config.yaml"), []byte(configYamlContent), 0644); err != nil { + return fmt.Errorf("failed to write config.yaml: %w", err) + } + convertedOverrides, err := p.convertAddressOverrides(mspConfigPath, p.opts.AddressOverrides) + if err != nil { + return fmt.Errorf("failed to convert address overrides: %w", err) + } + + // Define template data + data := struct { + PeerID string + ListenAddress string + ChaincodeAddress string + ExternalEndpoint string + DataPath string + MSPID string + ExternalBuilderPath string + OperationsListenAddress string + AddressOverrides []AddressOverridePath + }{ + PeerID: p.opts.ID, + ListenAddress: p.opts.ListenAddress, + ChaincodeAddress: p.opts.ChaincodeAddress, + ExternalEndpoint: p.opts.ExternalEndpoint, + DataPath: dataConfigPath, + MSPID: p.mspID, + ExternalBuilderPath: filepath.Join(mspConfigPath, "ccaas"), + OperationsListenAddress: p.opts.OperationsListenAddress, + AddressOverrides: convertedOverrides, + } + // Create template tmpl, err := template.New("core.yaml").Parse(coreYamlTemplate) if err != nil { @@ -1767,6 +1964,10 @@ metrics: return nil } +func (p *LocalPeer) getLogPath() string { + return p.GetStdOutPath() +} + // TailLogs tails the logs of the peer service func (p *LocalPeer) TailLogs(ctx context.Context, tail int, follow bool) (<-chan string, error) { logChan := make(chan string, 100) @@ -1854,12 +2055,7 @@ type CAConfig struct { func (p *LocalPeer) PrepareAdminCertMSP(mspID string) (string, error) { // Create all required directories with proper permissions // Determine admin cert path based on mspID - homeDir, err := os.UserHomeDir() - if err != nil { - return "", fmt.Errorf("failed to get home directory: %w", err) - } - - adminMspPath := filepath.Join(homeDir, ".chainlaunch/orgs", strings.ToLower(mspID), "users/admin") + adminMspPath := filepath.Join(p.configService.GetDataPath(), "orgs", strings.ToLower(mspID), "users/admin") // Check if admin cert directory already exists if _, err := os.Stat(adminMspPath); err == nil { @@ -1949,330 +2145,816 @@ func (p *LocalPeer) PrepareAdminCertMSP(mspID string) (string, error) { return "", fmt.Errorf("failed to write config.yaml: %w", err) } - return adminMspPath, nil -} - -// LeaveChannel removes the peer from a channel -func (p *LocalPeer) LeaveChannel(channelID string) error { - err := p.Stop() + return adminMspPath, nil +} + +// LeaveChannel removes the peer from a channel +func (p *LocalPeer) LeaveChannel(channelID string) error { + err := p.Stop() + if err != nil { + return fmt.Errorf("failed to stop peer: %w", err) + } + + p.logger.Info("Removing peer from channel", "peer", p.opts.ID, "channel", channelID) + + // Build peer channel remove command + peerBinary, err := p.findPeerBinary() + if err != nil { + return fmt.Errorf("failed to find peer binary: %w", err) + } + peerPath := p.getPeerPath() + peerConfigPath := filepath.Join(peerPath, "config") + cmd := exec.Command(peerBinary, "node", "unjoin", "-c", channelID) + listenAddress := strings.Replace(p.opts.ListenAddress, "0.0.0.0", "localhost", 1) + + // Set environment variables + cmd.Env = append(os.Environ(), + fmt.Sprintf("CORE_PEER_MSPCONFIGPATH=%s", peerConfigPath), + fmt.Sprintf("CORE_PEER_ADDRESS=%s", listenAddress), + fmt.Sprintf("CORE_PEER_LOCALMSPID=%s", p.mspID), + "CORE_PEER_TLS_ENABLED=true", + fmt.Sprintf("FABRIC_CFG_PATH=%s", peerConfigPath), + ) + + // Execute command + output, err := cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("failed to remove channel: %w, output: %s", err, string(output)) + } + + p.logger.Info("Successfully removed channel", "peer", p.opts.ID, "channel", channelID) + _, err = p.Start() + if err != nil { + return fmt.Errorf("failed to start peer: %w", err) + } + return nil +} +func (p *LocalPeer) GetPeerURL() string { + return fmt.Sprintf("grpcs://%s", p.opts.ExternalEndpoint) +} + +func (p *LocalPeer) GetPeerAddress() string { + return p.opts.ExternalEndpoint +} + +func (p *LocalPeer) GetTLSRootCACert(ctx context.Context) (string, error) { + tlsCAKeyDB, err := p.keyService.GetKey(ctx, int(p.org.TlsRootKeyID.Int64)) + if err != nil { + return "", fmt.Errorf("failed to get TLS CA key: %w", err) + } + if tlsCAKeyDB.Certificate == nil { + return "", fmt.Errorf("TLS CA key is not set") + } + return *tlsCAKeyDB.Certificate, nil +} + +func (p *LocalPeer) GetSignRootCACert(ctx context.Context) (string, error) { + signCAKeyDB, err := p.keyService.GetKey(ctx, int(p.org.SignKeyID.Int64)) + if err != nil { + return "", fmt.Errorf("failed to get TLS CA key: %w", err) + } + if signCAKeyDB.Certificate == nil { + return "", fmt.Errorf("TLS CA key is not set") + } + return *signCAKeyDB.Certificate, nil +} + +type SaveChannelConfigResponse struct { + TransactionID string +} + +func (p *LocalPeer) SaveChannelConfig(ctx context.Context, channelID string, ordererUrl string, ordererTlsCACert string, channelData *cb.Envelope) (*SaveChannelConfigResponse, error) { + adminIdentity, _, err := p.GetAdminIdentity(ctx) + if err != nil { + return nil, fmt.Errorf("failed to get admin identity: %w", err) + } + envelope, err := SignConfigTx(channelID, channelData, adminIdentity) + if err != nil { + return nil, fmt.Errorf("failed to set anchor peers: %w", err) + } + + ordererConn, err := p.CreateOrdererConnection(ctx, ordererUrl, ordererTlsCACert) + if err != nil { + return nil, fmt.Errorf("failed to create orderer connection: %w", err) + } + defer ordererConn.Close() + ordererClient, err := orderer.NewAtomicBroadcastClient(ordererConn).Broadcast(context.Background()) + if err != nil { + return nil, fmt.Errorf("failed to create orderer client: %w", err) + } + err = ordererClient.Send(envelope) + if err != nil { + return nil, fmt.Errorf("failed to send envelope: %w", err) + } + response, err := ordererClient.Recv() + if err != nil { + return nil, fmt.Errorf("failed to receive response: %w", err) + } + return &SaveChannelConfigResponse{ + TransactionID: response.String(), + }, nil +} + +// SaveChannelConfigResponse contains the transaction ID of the saved channel configuration + +// CreateOrdererConnection establishes a gRPC connection to an orderer +func (p *LocalPeer) CreateOrdererConnection(ctx context.Context, ordererURL string, ordererTLSCACert string) (*grpc.ClientConn, error) { + p.logger.Info("Creating orderer connection", + "ordererURL", ordererURL) + + // Create a network node with the orderer details + networkNode := network.Node{ + Addr: ordererURL, + TLSCACertByte: []byte(ordererTLSCACert), + } + + // Establish connection to the orderer + ordererConn, err := network.DialConnection(networkNode) + if err != nil { + return nil, fmt.Errorf("failed to dial orderer connection: %w", err) + } + + return ordererConn, nil +} + +const ( + msgVersion = int32(0) + epoch = 0 +) + +func SignConfigTx(channelID string, envConfigUpdate *cb.Envelope, signer identity.SigningIdentity) (*cb.Envelope, error) { + payload, err := protoutil.UnmarshalPayload(envConfigUpdate.Payload) + if err != nil { + return nil, errors.New("bad payload") + } + + if payload.Header == nil || payload.Header.ChannelHeader == nil { + return nil, errors.New("bad header") + } + + ch, err := protoutil.UnmarshalChannelHeader(payload.Header.ChannelHeader) + if err != nil { + return nil, errors.New("could not unmarshall channel header") + } + + if ch.Type != int32(cb.HeaderType_CONFIG_UPDATE) { + return nil, errors.New("bad type") + } + + if ch.ChannelId == "" { + return nil, errors.New("empty channel id") + } + + configUpdateEnv, err := protoutil.UnmarshalConfigUpdateEnvelope(payload.Data) + if err != nil { + return nil, errors.New("bad config update env") + } + + sigHeader, err := protoutil.NewSignatureHeader(signer) + if err != nil { + return nil, err + } + + configSig := &cb.ConfigSignature{ + SignatureHeader: protoutil.MarshalOrPanic(sigHeader), + } + + configSig.Signature, err = signer.Sign(Concatenate(configSig.SignatureHeader, configUpdateEnv.ConfigUpdate)) + if err != nil { + return nil, err + } + + configUpdateEnv.Signatures = append(configUpdateEnv.Signatures, configSig) + + return protoutil.CreateSignedEnvelope(cb.HeaderType_CONFIG_UPDATE, channelID, signer, configUpdateEnv, msgVersion, epoch) +} + +func Concatenate[T any](slices ...[]T) []T { + size := 0 + for _, slice := range slices { + size += len(slice) + } + + result := make([]T, size) + i := 0 + for _, slice := range slices { + copy(result[i:], slice) + i += len(slice) + } + + return result +} + +// CreatePeerConnection establishes a gRPC connection to a peer +func (p *LocalPeer) CreatePeerConnection(ctx context.Context, peerURL string, peerTLSCACert string) (*grpc.ClientConn, error) { + // Create a temporary file for the TLS CA certificate + + networkNode := network.Node{ + Addr: peerURL, + TLSCACertByte: []byte(peerTLSCACert), + } + peerConn, err := network.DialConnection(networkNode) + if err != nil { + return nil, fmt.Errorf("failed to dial peer connection: %w", err) + } + return peerConn, nil +} + +func (p *LocalPeer) GetMSPID() string { + return p.mspID +} +func (p *LocalPeer) GetAdminIdentity(ctx context.Context) (identity.SigningIdentity, gwidentity.Sign, error) { + adminSignKeyDB, err := p.keyService.GetKey(ctx, int(p.org.AdminSignKeyID.Int64)) + if err != nil { + return nil, nil, fmt.Errorf("failed to get TLS CA key: %w", err) + } + if adminSignKeyDB.Certificate == nil { + return nil, nil, fmt.Errorf("TLS CA key is not set") + } + certificate := *adminSignKeyDB.Certificate + privateKey, err := p.keyService.GetDecryptedPrivateKey(int(p.org.AdminSignKeyID.Int64)) + if err != nil { + return nil, nil, fmt.Errorf("failed to get decrypted private key: %w", err) + } + + cert, err := gwidentity.CertificateFromPEM([]byte(certificate)) + if err != nil { + return nil, nil, fmt.Errorf("failed to read certificate: %w", err) + } + + priv, err := gwidentity.PrivateKeyFromPEM([]byte(privateKey)) + if err != nil { + return nil, nil, fmt.Errorf("failed to read private key: %w", err) + } + + signingIdentity, err := identity.NewPrivateKeySigningIdentity(p.mspID, cert, priv) + if err != nil { + return nil, nil, fmt.Errorf("failed to create signing identity: %w", err) + } + + signer, err := gwidentity.NewPrivateKeySign(priv) + if err != nil { + return nil, nil, fmt.Errorf("failed to create signer: %w", err) + } + return signingIdentity, signer, nil +} + +// Add this struct near the top with other type definitions +type GetChannelConfigResponse struct { + ChannelGroup *cb.Config +} + +// Add this new method to the LocalPeer struct +func (p *LocalPeer) GetChannelBlock(ctx context.Context, channelID string, ordererUrl string, ordererTlsCACert string) (*cb.Block, error) { + p.logger.Info("Fetching channel config", + "peer", p.opts.ID, + "channel", channelID, + "ordererUrl", ordererUrl) + + // Get admin identity + adminIdentity, _, err := p.GetAdminIdentity(ctx) + if err != nil { + return nil, fmt.Errorf("failed to get admin identity: %w", err) + } + peerUrl := p.GetPeerAddress() + peerTLSCACert, err := p.GetTLSRootCACert(ctx) + if err != nil { + return nil, fmt.Errorf("failed to get TLS CA cert: %w", err) + } + peerConn, err := p.CreatePeerConnection(ctx, peerUrl, peerTLSCACert) + if err != nil { + return nil, fmt.Errorf("failed to create peer connection: %w", err) + } + defer peerConn.Close() + // Fetch channel configuration + configBlock, err := channel.GetConfigBlock(ctx, peerConn, adminIdentity, channelID) + if err != nil { + return nil, fmt.Errorf("failed to query channel config: %w", err) + } + return configBlock, nil + +} + +// getOrdererTLSKeyPair creates a TLS key pair for secure communication with the orderer +func (p *LocalPeer) getOrdererTLSKeyPair(ctx context.Context, ordererTLSCert string) (tls.Certificate, error) { + // Get organization details + org, err := p.orgService.GetOrganizationByMspID(ctx, p.mspID) + if err != nil { + return tls.Certificate{}, fmt.Errorf("failed to get organization: %w", err) + } + + if !org.AdminSignKeyID.Valid { + return tls.Certificate{}, fmt.Errorf("organization has no admin sign key") + } + + // Get private key from key management service + privateKeyPEM, err := p.keyService.GetDecryptedPrivateKey(int(org.AdminSignKeyID.Int64)) + if err != nil { + return tls.Certificate{}, fmt.Errorf("failed to get private key: %w", err) + } + + // Parse the orderer TLS certificate + ordererTLSCertParsed, err := tls.X509KeyPair([]byte(ordererTLSCert), []byte(privateKeyPEM)) + if err != nil { + return tls.Certificate{}, fmt.Errorf("failed to parse orderer TLS certificate: %w", err) + } + + return ordererTLSCertParsed, nil +} + +// Add this new method to the LocalPeer struct +func (p *LocalPeer) GetChannelConfig(ctx context.Context, channelID string, ordererUrl string, ordererTlsCACert string) (*GetChannelConfigResponse, error) { + + // Fetch channel configuration + configBlock, err := p.GetChannelBlock(ctx, channelID, ordererUrl, ordererTlsCACert) + if err != nil { + return nil, fmt.Errorf("failed to query channel config: %w", err) + } + + cmnConfig, err := ExtractConfigFromBlock(configBlock) + if err != nil { + return nil, fmt.Errorf("failed to extract config from block: %w", err) + } + return &GetChannelConfigResponse{ + ChannelGroup: cmnConfig, + }, nil +} + +// SaveChannelConfigWithSignaturesResponse represents the response from saving a channel config with signatures +type SaveChannelConfigWithSignaturesResponse struct { + TransactionID string +} + +// SaveChannelConfigWithSignatures submits a config update envelope with signatures to the orderer +func (p *LocalPeer) SaveChannelConfigWithSignatures( + ctx context.Context, + channelID string, + ordererUrl string, + ordererTlsCACert string, + envelopeBytes []byte, + signatures [][]byte, +) (*SaveChannelConfigWithSignaturesResponse, error) { + var cbEnvelope *cb.Envelope + if err := proto.Unmarshal(envelopeBytes, cbEnvelope); err != nil { + return nil, fmt.Errorf("failed to unmarshal envelope: %w", err) + } + + signedEnvelope, err := protoutil.FormSignedEnvelope(cb.HeaderType_CONFIG_UPDATE, channelID, cbEnvelope, signatures, 1, 0) if err != nil { - return fmt.Errorf("failed to stop peer: %w", err) + return nil, fmt.Errorf("failed to form signed envelope: %w", err) } - p.logger.Info("Removing peer from channel", "peer", p.opts.ID, "channel", channelID) - - // Build peer channel remove command - peerBinary, err := p.findPeerBinary() + ordererConn, err := p.CreateOrdererConnection(ctx, ordererUrl, ordererTlsCACert) if err != nil { - return fmt.Errorf("failed to find peer binary: %w", err) + return nil, fmt.Errorf("failed to create orderer connection: %w", err) } - peerPath := p.getPeerPath() - peerConfigPath := filepath.Join(peerPath, "config") - cmd := exec.Command(peerBinary, "node", "unjoin", "-c", channelID) - listenAddress := strings.Replace(p.opts.ListenAddress, "0.0.0.0", "localhost", 1) - - // Set environment variables - cmd.Env = append(os.Environ(), - fmt.Sprintf("CORE_PEER_MSPCONFIGPATH=%s", peerConfigPath), - fmt.Sprintf("CORE_PEER_ADDRESS=%s", listenAddress), - fmt.Sprintf("CORE_PEER_LOCALMSPID=%s", p.mspID), - "CORE_PEER_TLS_ENABLED=true", - fmt.Sprintf("FABRIC_CFG_PATH=%s", peerConfigPath), - ) - - // Execute command - output, err := cmd.CombinedOutput() + defer ordererConn.Close() + ordererClient, err := orderer.NewAtomicBroadcastClient(ordererConn).Broadcast(context.Background()) if err != nil { - return fmt.Errorf("failed to remove channel: %w, output: %s", err, string(output)) + return nil, fmt.Errorf("failed to create orderer client: %w", err) } - p.logger.Info("Successfully removed channel", "peer", p.opts.ID, "channel", channelID) - _, err = p.Start() + err = ordererClient.Send(signedEnvelope) if err != nil { - return fmt.Errorf("failed to start peer: %w", err) + return nil, fmt.Errorf("failed to save channel config with signatures: %w", err) } - return nil + response, err := ordererClient.Recv() + if err != nil { + return nil, fmt.Errorf("failed to receive response: %w", err) + } + return &SaveChannelConfigWithSignaturesResponse{ + TransactionID: response.String(), + }, nil } -func (p *LocalPeer) GetPeerURL() string { - return fmt.Sprintf("grpcs://%s", p.opts.ExternalEndpoint) + +type PeerChannel struct { + Name string `json:"name"` + BlockNum int64 `json:"blockNum"` + CreatedAt time.Time `json:"createdAt"` } -func (p *LocalPeer) GetTLSRootCACert(ctx context.Context) (string, error) { - tlsCAKeyDB, err := p.keyService.GetKey(ctx, int(p.org.TlsRootKeyID.Int64)) +// GetChannels returns a list of channels the peer has joined +func (p *LocalPeer) GetChannels(ctx context.Context) ([]PeerChannel, error) { + peerUrl := p.GetPeerAddress() + tlsCACert, err := p.GetTLSRootCACert(ctx) if err != nil { - return "", fmt.Errorf("failed to get TLS CA key: %w", err) + return nil, fmt.Errorf("failed to get TLS CA cert: %w", err) } - if tlsCAKeyDB.Certificate == nil { - return "", fmt.Errorf("TLS CA key is not set") + peerConn, err := p.CreatePeerConnection(ctx, peerUrl, tlsCACert) + if err != nil { + return nil, fmt.Errorf("failed to create peer connection: %w", err) } - return *tlsCAKeyDB.Certificate, nil + defer peerConn.Close() + adminIdentity, _, err := p.GetAdminIdentity(ctx) + if err != nil { + return nil, fmt.Errorf("failed to get admin identity: %w", err) + } + + channelList, err := channel.ListChannelOnPeer(ctx, peerConn, adminIdentity) + if err != nil { + return nil, fmt.Errorf("failed to list channels on peer: %w", err) + } + + channels := make([]PeerChannel, len(channelList)) + for i, channel := range channelList { + blockInfo, err := p.GetChannelInfoOnPeer(ctx, channel.ChannelId) + if err != nil { + channels[i] = PeerChannel{ + Name: channel.ChannelId, + BlockNum: 0, + CreatedAt: time.Now(), + } + } else { + channels[i] = PeerChannel{ + Name: channel.ChannelId, + BlockNum: int64(blockInfo.Height), + CreatedAt: time.Now(), + } + } + } + return channels, nil } -func (p *LocalPeer) GetSignRootCACert(ctx context.Context) (string, error) { - signCAKeyDB, err := p.keyService.GetKey(ctx, int(p.org.SignKeyID.Int64)) +// getChannelBlockInfo gets the current block height for a channel +func (p *LocalPeer) getChannelBlockInfo(ctx context.Context, channelID string) (*cb.BlockchainInfo, error) { + peerUrl := p.GetPeerAddress() + tlsCACert, err := p.GetTLSRootCACert(ctx) if err != nil { - return "", fmt.Errorf("failed to get TLS CA key: %w", err) + return nil, fmt.Errorf("failed to get TLS CA cert: %w", err) } - if signCAKeyDB.Certificate == nil { - return "", fmt.Errorf("TLS CA key is not set") + peerConn, err := p.CreatePeerConnection(ctx, peerUrl, tlsCACert) + if err != nil { + return nil, fmt.Errorf("failed to create peer connection: %w", err) } - return *signCAKeyDB.Certificate, nil + defer peerConn.Close() + adminIdentity, _, err := p.GetAdminIdentity(ctx) + if err != nil { + return nil, fmt.Errorf("failed to get admin identity: %w", err) + } + + // Query info for the channel + channelInfo, err := channel.GetBlockChainInfo(ctx, peerConn, adminIdentity, channelID) + if err != nil { + return nil, fmt.Errorf("failed to query channel info: %w", err) + } + + // Get the block number from the channel info + + return channelInfo, nil } -type SaveChannelConfigResponse struct { - TransactionID string +// ExtractConfigFromBlock extracts channel configuration from block +func ExtractConfigFromBlock(block *cb.Block) (*cb.Config, error) { + if block == nil || block.Data == nil || len(block.Data.Data) == 0 { + return nil, errors.New("invalid block") + } + blockPayload := block.Data.Data[0] + + envelope := &cb.Envelope{} + if err := proto.Unmarshal(blockPayload, envelope); err != nil { + return nil, err + } + payload := &cb.Payload{} + if err := proto.Unmarshal(envelope.Payload, payload); err != nil { + return nil, err + } + + cfgEnv := &cb.ConfigEnvelope{} + if err := proto.Unmarshal(payload.Data, cfgEnv); err != nil { + return nil, err + } + return cfgEnv.Config, nil } -func (p *LocalPeer) SaveChannelConfig(ctx context.Context, channelID string, ordererUrl string, ordererTlsCACert string, channelData []byte) (*SaveChannelConfigResponse, error) { - peerUrl := p.GetPeerURL() +// GetBlock retrieves a specific block by its number +func (p *LocalPeer) GetBlock(ctx context.Context, channelID string, blockNum uint64) (*cb.Block, error) { + peerUrl := p.GetPeerAddress() tlsCACert, err := p.GetTLSRootCACert(ctx) if err != nil { return nil, fmt.Errorf("failed to get TLS CA cert: %w", err) } + peerConn, err := p.CreatePeerConnection(ctx, peerUrl, tlsCACert) + if err != nil { + return nil, fmt.Errorf("failed to create peer connection: %w", err) + } + defer peerConn.Close() + adminIdentity, signer, err := p.GetAdminIdentity(ctx) + if err != nil { + return nil, fmt.Errorf("failed to get admin identity: %w", err) + } + gateway, err := client.Connect(adminIdentity, client.WithClientConnection(peerConn), client.WithSign(signer)) + if err != nil { + return nil, fmt.Errorf("failed to connect to gateway: %w", err) + } + defer gateway.Close() + network := gateway.GetNetwork(channelID) + blockEvents, err := network.BlockAndPrivateDataEvents(ctx, client.WithStartBlock(blockNum)) + if err != nil { + return nil, fmt.Errorf("failed to get block: %w", err) + } - networkConfig, err := p.generateNetworkConfigForPeer( - peerUrl, - p.mspID, - tlsCACert, - ordererUrl, - ordererTlsCACert, - ) + for blockEvent := range blockEvents { + return blockEvent.Block, nil + } + return nil, fmt.Errorf("block not found") +} + + +func (p *LocalPeer) GetBlockTransactions(ctx context.Context, channelID string, blockNum uint64) ([]*cb.Envelope, error) { + peerUrl := p.GetPeerAddress() + tlsCACert, err := p.GetTLSRootCACert(ctx) if err != nil { - return nil, fmt.Errorf("failed to generate network config: %w", err) + return nil, fmt.Errorf("failed to get TLS CA cert: %w", err) } - configBackend := config.FromRaw([]byte(networkConfig.NetworkConfig), "yaml") - sdk, err := fabsdk.New(configBackend) + peerConn, err := p.CreatePeerConnection(ctx, peerUrl, tlsCACert) if err != nil { - return nil, fmt.Errorf("failed to create sdk: %w", err) + return nil, fmt.Errorf("failed to create peer connection: %w", err) } - defer sdk.Close() - adminIdentity, err := p.GetAdminIdentity(ctx, sdk) + defer peerConn.Close() + adminIdentity, signer, err := p.GetAdminIdentity(ctx) if err != nil { return nil, fmt.Errorf("failed to get admin identity: %w", err) } - sdkContext := sdk.Context( - fabsdk.WithIdentity(adminIdentity), - fabsdk.WithOrg(p.mspID), - ) - resClient, err := resmgmt.New(sdkContext) + gateway, err := client.Connect(adminIdentity, client.WithClientConnection(peerConn), client.WithSign(signer)) if err != nil { - return nil, fmt.Errorf("failed to create resmgmt client: %w", err) + return nil, fmt.Errorf("failed to connect to gateway: %w", err) } - configUpdateReader := bytes.NewReader(channelData) - chResponse, err := resClient.SaveChannel(resmgmt.SaveChannelRequest{ - ChannelID: channelID, - ChannelConfig: configUpdateReader, - }) + defer gateway.Close() + network := gateway.GetNetwork(channelID) + blockEvents, err := network.BlockAndPrivateDataEvents(ctx, client.WithStartBlock(blockNum)) if err != nil { - return nil, fmt.Errorf("failed to save channel config: %w", err) + return nil, fmt.Errorf("failed to get block: %w", err) } - return &SaveChannelConfigResponse{ - TransactionID: string(chResponse.TransactionID), - }, nil -} -func (p *LocalPeer) GetMSPID() string { - return p.mspID + for blockEvent := range blockEvents { + var transactions []*cb.Envelope + for _, data := range blockEvent.Block.Data.Data { + envelope := &cb.Envelope{} + if err := proto.Unmarshal(data, envelope); err != nil { + return nil, fmt.Errorf("failed to unmarshal transaction envelope: %w", err) + } + transactions = append(transactions, envelope) + } + return transactions, nil + } + return nil, fmt.Errorf("block not found") } -func (p *LocalPeer) GetAdminIdentity(ctx context.Context, sdk *fabsdk.FabricSDK) (msp.SigningIdentity, error) { - adminSignKeyDB, err := p.keyService.GetKey(ctx, int(p.org.AdminSignKeyID.Int64)) + +// GetBlocksInRange retrieves blocks from startBlock to endBlock (inclusive) +func (p *LocalPeer) GetBlocksInRange(ctx context.Context, channelID string, startBlock, endBlock uint64) ([]*cb.Block, error) { + peerUrl := p.GetPeerAddress() + tlsCACert, err := p.GetTLSRootCACert(ctx) if err != nil { - return nil, fmt.Errorf("failed to get TLS CA key: %w", err) + return nil, fmt.Errorf("failed to get TLS CA cert: %w", err) } - if adminSignKeyDB.Certificate == nil { - return nil, fmt.Errorf("TLS CA key is not set") + peerConn, err := p.CreatePeerConnection(ctx, peerUrl, tlsCACert) + if err != nil { + return nil, fmt.Errorf("failed to create peer connection: %w", err) } - certificate := *adminSignKeyDB.Certificate - privateKey, err := p.keyService.GetDecryptedPrivateKey(int(p.org.AdminSignKeyID.Int64)) + defer peerConn.Close() + adminIdentity, signer, err := p.GetAdminIdentity(ctx) if err != nil { - return nil, fmt.Errorf("failed to get decrypted private key: %w", err) + return nil, fmt.Errorf("failed to get admin identity: %w", err) } - sdkConfig, err := sdk.Config() + gateway, err := client.Connect(adminIdentity, client.WithClientConnection(peerConn), client.WithSign(signer)) if err != nil { - return nil, fmt.Errorf("failed to get sdk config: %w", err) + return nil, fmt.Errorf("failed to connect to gateway: %w", err) } - cryptoConfig := cryptosuite.ConfigFromBackend(sdkConfig) - cryptoSuite, err := sw.GetSuiteByConfig(cryptoConfig) + defer gateway.Close() + + network := gateway.GetNetwork(channelID) + blockEvents, err := network.BlockEvents(ctx, client.WithStartBlock(startBlock)) if err != nil { - return nil, fmt.Errorf("failed to get crypto suite: %w", err) + return nil, fmt.Errorf("failed to get blocks: %w", err) + } + + var blocks []*cb.Block + blockCount := uint64(0) + maxBlocks := endBlock - startBlock + 1 + + for blockEvent := range blockEvents { + blocks = append(blocks, blockEvent) + blockCount++ + + if blockCount >= maxBlocks || blockEvent.Header.Number >= endBlock { + break + } + } + + if len(blocks) == 0 { + return nil, fmt.Errorf("no blocks found in range %d to %d", startBlock, endBlock) } - userStore := mspimpl.NewMemoryUserStore() - endpointConfig, err := fab.ConfigFromBackend(sdkConfig) + + return blocks, nil +} + +// GetChannelBlockInfo retrieves information about the blockchain for a specific channel +func (p *LocalPeer) GetChannelBlockInfo(ctx context.Context, channelID string) (*BlockInfo, error) { + peerUrl := p.GetPeerAddress() + tlsCACert, err := p.GetTLSRootCACert(ctx) if err != nil { - return nil, fmt.Errorf("failed to get endpoint config: %w", err) + return nil, fmt.Errorf("failed to get TLS CA cert: %w", err) } - mspID := p.GetMSPID() - identityManager, err := mspimpl.NewIdentityManager(mspID, userStore, cryptoSuite, endpointConfig) + + peerConn, err := p.CreatePeerConnection(ctx, peerUrl, tlsCACert) if err != nil { - return nil, fmt.Errorf("failed to get identity manager: %w", err) + return nil, fmt.Errorf("failed to create peer connection: %w", err) } - signingIdentity, err := identityManager.CreateSigningIdentity( - msp.WithPrivateKey([]byte(privateKey)), - msp.WithCert([]byte(certificate)), - ) + defer peerConn.Close() + + adminIdentity, _, err := p.GetAdminIdentity(ctx) if err != nil { - return nil, fmt.Errorf("failed to create signing identity: %w", err) + return nil, fmt.Errorf("failed to get admin identity: %w", err) + } + blockInfo, err := channel.GetBlockChainInfo(ctx, peerConn, adminIdentity, channelID) + if err != nil { + return nil, fmt.Errorf("failed to get block chain info: %w", err) } - return signingIdentity, nil -} -// Add this struct near the top with other type definitions -type GetChannelConfigResponse struct { - ChannelGroup *cb.Config + return &BlockInfo{ + Height: blockInfo.Height, + CurrentBlockHash: fmt.Sprintf("%x", blockInfo.CurrentBlockHash), + PreviousBlockHash: fmt.Sprintf("%x", blockInfo.PreviousBlockHash), + }, nil + } -// Add this new method to the LocalPeer struct -func (p *LocalPeer) GetChannelBlock(ctx context.Context, channelID string, ordererUrl string, ordererTlsCACert string) (*cb.Block, error) { - p.logger.Info("Fetching channel config", - "peer", p.opts.ID, - "channel", channelID, - "ordererUrl", ordererUrl) +const ( + qscc = "qscc" + qsccTransactionByID = "GetTransactionByID" + qsccChannelInfo = "GetChainInfo" + qsccBlockByHash = "GetBlockByHash" + qsccBlockByNumber = "GetBlockByNumber" + qsccBlockByTxID = "GetBlockByTxID" +) - // Get peer URL and TLS cert - peerUrl := p.GetPeerURL() +// GetBlockByTxID retrieves a block containing the specified transaction ID +func (p *LocalPeer) GetBlockByTxID(ctx context.Context, channelID string, txID string) (*cb.Block, error) { + peerUrl := p.GetPeerAddress() tlsCACert, err := p.GetTLSRootCACert(ctx) if err != nil { return nil, fmt.Errorf("failed to get TLS CA cert: %w", err) } - // Generate network config for SDK - networkConfig, err := p.generateNetworkConfigForPeer( - peerUrl, - p.mspID, - tlsCACert, - ordererUrl, - ordererTlsCACert, - ) + peerConn, err := p.CreatePeerConnection(ctx, peerUrl, tlsCACert) if err != nil { - return nil, fmt.Errorf("failed to generate network config: %w", err) + return nil, fmt.Errorf("failed to create peer connection: %w", err) } + defer peerConn.Close() - // Initialize SDK with network config - configBackend := config.FromRaw([]byte(networkConfig.NetworkConfig), "yaml") - sdk, err := fabsdk.New(configBackend) + adminIdentity, signer, err := p.GetAdminIdentity(ctx) if err != nil { - return nil, fmt.Errorf("failed to create sdk: %w", err) + return nil, fmt.Errorf("failed to get admin identity: %w", err) } - defer sdk.Close() - // Get admin identity - adminIdentity, err := p.GetAdminIdentity(ctx, sdk) + gateway, err := client.Connect(adminIdentity, client.WithClientConnection(peerConn), client.WithSign(signer)) if err != nil { - return nil, fmt.Errorf("failed to get admin identity: %w", err) + return nil, fmt.Errorf("failed to connect to gateway: %w", err) } - - // Create SDK context with admin identity - sdkContext := sdk.Context( - fabsdk.WithIdentity(adminIdentity), - fabsdk.WithOrg(p.mspID), - ) - - // Create resource management client - resClient, err := resmgmt.New(sdkContext) + defer gateway.Close() + network := gateway.GetNetwork(channelID) + contract := network.GetContract(qscc) + response, err := contract.EvaluateTransaction(qsccBlockByTxID, channelID, txID) if err != nil { - return nil, fmt.Errorf("failed to create resmgmt client: %w", err) + return nil, fmt.Errorf("failed to query block by transaction ID: %w", err) } - // Fetch channel configuration - configBlock, err := resClient.QueryConfigBlockFromOrderer(channelID) - if err != nil { - return nil, fmt.Errorf("failed to query channel config: %w", err) + // Unmarshal block + block := &cb.Block{} + if err := proto.Unmarshal(response, block); err != nil { + return nil, fmt.Errorf("failed to unmarshal block: %w", err) } - return configBlock, nil + return block, nil } -// Add this new method to the LocalPeer struct -func (p *LocalPeer) GetChannelConfig(ctx context.Context, channelID string, ordererUrl string, ordererTlsCACert string) (*GetChannelConfigResponse, error) { +// GetBlockByTxID retrieves a block containing the specified transaction ID +func (p *LocalPeer) GetChannelInfoOnPeer(ctx context.Context, channelID string) (*cb.BlockchainInfo, error) { + peerUrl := p.GetPeerAddress() + tlsCACert, err := p.GetTLSRootCACert(ctx) + if err != nil { + return nil, fmt.Errorf("failed to get TLS CA cert: %w", err) + } - // Fetch channel configuration - configBlock, err := p.GetChannelBlock(ctx, channelID, ordererUrl, ordererTlsCACert) + peerConn, err := p.CreatePeerConnection(ctx, peerUrl, tlsCACert) if err != nil { - return nil, fmt.Errorf("failed to query channel config: %w", err) + return nil, fmt.Errorf("failed to create peer connection: %w", err) } + defer peerConn.Close() - cmnConfig, err := resource.ExtractConfigFromBlock(configBlock) + adminIdentity, signer, err := p.GetAdminIdentity(ctx) if err != nil { - return nil, fmt.Errorf("failed to extract config from block: %w", err) + return nil, fmt.Errorf("failed to get admin identity: %w", err) } - return &GetChannelConfigResponse{ - ChannelGroup: cmnConfig, - }, nil -} -// SaveChannelConfigWithSignaturesResponse represents the response from saving a channel config with signatures -type SaveChannelConfigWithSignaturesResponse struct { - TransactionID string + gateway, err := client.Connect(adminIdentity, client.WithClientConnection(peerConn), client.WithSign(signer)) + if err != nil { + return nil, fmt.Errorf("failed to connect to gateway: %w", err) + } + defer gateway.Close() + network := gateway.GetNetwork(channelID) + contract := network.GetContract(qscc) + response, err := contract.EvaluateTransaction(qsccChannelInfo, channelID) + if err != nil { + return nil, fmt.Errorf("failed to query block by transaction ID: %w", err) + } + p.logger.Info("Channel info", "response", response) + bci := &cb.BlockchainInfo{} + if err := proto.Unmarshal(response, bci); err != nil { + return nil, fmt.Errorf("failed to unmarshal block chain info: %w", err) + } + return bci, nil } -// SaveChannelConfigWithSignatures submits a config update envelope with signatures to the orderer -func (p *LocalPeer) SaveChannelConfigWithSignatures( - ctx context.Context, - channelID string, - ordererUrl string, - ordererTlsCACert string, - envelopeBytes []byte, - signatures [][]byte, -) (*SaveChannelConfigWithSignaturesResponse, error) { - // Create a network config for the SDK - netConfig, err := p.generateNetworkConfigForPeer( - p.GetPeerURL(), - p.mspID, - "", // We don't need the peer TLS CA cert for this operation - ordererUrl, - ordererTlsCACert, - ) +// SynchronizeConfig synchronizes the peer's configuration files and service +func (p *LocalPeer) SynchronizeConfig(deployConfig *types.FabricPeerDeploymentConfig) error { + slugifiedID := strings.ReplaceAll(strings.ToLower(p.opts.ID), " ", "-") + dirPath := filepath.Join(p.configService.GetDataPath(), "peers", slugifiedID) + mspConfigPath := filepath.Join(dirPath, "config") + dataConfigPath := filepath.Join(dirPath, "data") + // Write config.yaml + if err := os.WriteFile(filepath.Join(mspConfigPath, "config.yaml"), []byte(configYamlContent), 0644); err != nil { + return fmt.Errorf("failed to write config.yaml: %w", err) + } + convertedOverrides, err := p.convertAddressOverrides(mspConfigPath, deployConfig.AddressOverrides) if err != nil { - return nil, fmt.Errorf("failed to generate network config: %w", err) + return fmt.Errorf("failed to convert address overrides: %w", err) } - // Initialize the SDK with the network config - configProvider := config.FromRaw([]byte(netConfig.NetworkConfig), "yaml") - sdk, err := fabsdk.New(configProvider) + // Define template data + data := struct { + PeerID string + ListenAddress string + ChaincodeAddress string + ExternalEndpoint string + DataPath string + MSPID string + ExternalBuilderPath string + OperationsListenAddress string + AddressOverrides []AddressOverridePath + }{ + PeerID: p.opts.ID, + ListenAddress: deployConfig.ListenAddress, + ChaincodeAddress: deployConfig.ChaincodeAddress, + ExternalEndpoint: deployConfig.ExternalEndpoint, + DataPath: dataConfigPath, + MSPID: deployConfig.MSPID, + ExternalBuilderPath: filepath.Join(mspConfigPath, "ccaas"), + OperationsListenAddress: deployConfig.OperationsListenAddress, + AddressOverrides: convertedOverrides, + } + // Create template + tmpl, err := template.New("core.yaml").Parse(coreYamlTemplate) if err != nil { - return nil, fmt.Errorf("failed to create SDK: %w", err) + return fmt.Errorf("failed to parse core.yaml template: %w", err) } - defer sdk.Close() - // Get admin identity for signing - adminIdentity, err := p.GetAdminIdentity(ctx, sdk) - if err != nil { - return nil, fmt.Errorf("failed to get admin identity: %w", err) + // Execute template + var buf bytes.Buffer + if err := tmpl.Execute(&buf, data); err != nil { + return fmt.Errorf("failed to execute core.yaml template: %w", err) } - // Create a resource management client - sdkContext := sdk.Context( - fabsdk.WithIdentity(adminIdentity), - fabsdk.WithOrg(p.mspID), - ) - resClient, err := resmgmt.New(sdkContext) - if err != nil { - return nil, fmt.Errorf("failed to create resmgmt client: %w", err) + // Write core.yaml + if err := os.WriteFile(filepath.Join(mspConfigPath, "core.yaml"), buf.Bytes(), 0644); err != nil { + return fmt.Errorf("failed to write core.yaml: %w", err) } - var cbSignatures []*common.ConfigSignature - for _, sig := range signatures { - configSig := &common.ConfigSignature{} - if err := proto.Unmarshal(sig, configSig); err != nil { - return nil, fmt.Errorf("failed to unmarshal signature: %w", err) - } + // Stop the peer if it's running + if err := p.Stop(); err != nil { + return fmt.Errorf("failed to stop peer before regenerating config: %w", err) + } - cbSignatures = append(cbSignatures, configSig) + // Start the peer to regenerate config files + if _, err := p.Start(); err != nil { + return fmt.Errorf("failed to restart peer after regenerating config: %w", err) } - // Submit the config update to the orderer - configUpdateReader := bytes.NewReader(envelopeBytes) - chResponse, err := resClient.SaveChannel( - resmgmt.SaveChannelRequest{ - ChannelID: channelID, - ChannelConfig: configUpdateReader, - }, - resmgmt.WithConfigSignatures(cbSignatures...), - ) - if err != nil { - return nil, fmt.Errorf("failed to save channel config with signatures: %w", err) + return nil +} + +// Add this new function +func (p *LocalPeer) convertAddressOverrides(mspConfigPath string, overrides []types.AddressOverride) ([]AddressOverridePath, error) { + // Create temporary directory for override certificates + tmpDir := filepath.Join(mspConfigPath, "orderer-overrides") + if err := os.MkdirAll(tmpDir, 0755); err != nil { + return nil, fmt.Errorf("failed to create orderer overrides directory: %w", err) } - return &SaveChannelConfigWithSignaturesResponse{ - TransactionID: string(chResponse.TransactionID), - }, nil + var convertedOverrides []AddressOverridePath + for i, override := range overrides { + // Write TLS CA certificate to file + certPath := filepath.Join(tmpDir, fmt.Sprintf("tlsca-%d.pem", i)) + if err := os.WriteFile(certPath, []byte(override.TLSCACert), 0644); err != nil { + return nil, fmt.Errorf("failed to write orderer TLS CA certificate: %w", err) + } + + // Add converted override + convertedOverrides = append(convertedOverrides, AddressOverridePath{ + From: override.From, + To: override.To, + TLSCAPath: certPath, + }) + } + + return convertedOverrides, nil } diff --git a/pkg/nodes/peer/service.go b/pkg/nodes/peer/service.go index d417c20..085147f 100644 --- a/pkg/nodes/peer/service.go +++ b/pkg/nodes/peer/service.go @@ -7,6 +7,7 @@ import ( "os/exec" "path/filepath" "runtime" + "strings" "text/template" ) @@ -60,7 +61,7 @@ After=network.target [Service] Type=simple WorkingDirectory={{.DirPath}} -ExecStart={{.Cmd}} +ExecStart=/bin/bash -c "{{.Cmd}} > {{.LogPath}} 2>&1" Restart=on-failure RestartSec=10 LimitNOFILE=65536 @@ -76,11 +77,13 @@ WantedBy=multi-user.target DirPath string Cmd string EnvVars []string + LogPath string }{ ID: p.opts.ID, DirPath: dirPath, Cmd: cmd, EnvVars: envStrings, + LogPath: p.GetStdOutPath(), } var buf bytes.Buffer @@ -102,6 +105,13 @@ func (p *LocalPeer) createLaunchdService(cmd string, env map[string]string, dirP envStrings = append(envStrings, fmt.Sprintf("%s\n %s", k, v)) } + // Escape special XML characters in cmd + cmd = strings.ReplaceAll(cmd, "&", "&") + cmd = strings.ReplaceAll(cmd, "<", "<") + cmd = strings.ReplaceAll(cmd, ">", ">") + cmd = strings.ReplaceAll(cmd, "'", "'") + cmd = strings.ReplaceAll(cmd, "\"", """) + tmpl := template.Must(template.New("launchd").Parse(` diff --git a/pkg/nodes/peer/types.go b/pkg/nodes/peer/types.go index 16a99d4..0c18d32 100644 --- a/pkg/nodes/peer/types.go +++ b/pkg/nodes/peer/types.go @@ -1,16 +1,19 @@ package peer +import "github.com/chainlaunch/chainlaunch/pkg/nodes/types" + // StartPeerOpts represents the options for starting a peer type StartPeerOpts struct { - ID string `json:"id"` - ListenAddress string `json:"listenAddress"` - ChaincodeAddress string `json:"chaincodeAddress"` - EventsAddress string `json:"eventsAddress"` - OperationsListenAddress string `json:"operationsListenAddress"` - ExternalEndpoint string `json:"externalEndpoint"` - DomainNames []string `json:"domainNames"` - Env map[string]string `json:"env"` - Version string `json:"version"` // Fabric version to use + ID string `json:"id"` + ListenAddress string `json:"listenAddress"` + ChaincodeAddress string `json:"chaincodeAddress"` + EventsAddress string `json:"eventsAddress"` + OperationsListenAddress string `json:"operationsListenAddress"` + ExternalEndpoint string `json:"externalEndpoint"` + DomainNames []string `json:"domainNames"` + Env map[string]string `json:"env"` + Version string `json:"version"` // Fabric version to use + AddressOverrides []types.AddressOverride `json:"addressOverrides,omitempty"` } // PeerConfig represents the configuration for a peer node @@ -42,3 +45,9 @@ type StartDockerResponse struct { Mode string `json:"mode"` ContainerName string `json:"containerName"` } + +type BlockInfo struct { + Height uint64 `json:"height"` + CurrentBlockHash string `json:"currentBlockHash"` + PreviousBlockHash string `json:"previousBlockHash"` +} diff --git a/pkg/nodes/service/config.go b/pkg/nodes/service/config.go index b8b8967..afebe15 100644 --- a/pkg/nodes/service/config.go +++ b/pkg/nodes/service/config.go @@ -13,6 +13,7 @@ type Node struct { BlockchainPlatform types.BlockchainPlatform `json:"platform"` NodeType types.NodeType `json:"nodeType"` Status types.NodeStatus `json:"status"` + ErrorMessage string `json:"errorMessage"` Endpoint string `json:"endpoint"` PublicEndpoint string `json:"publicEndpoint"` NodeConfig types.NodeConfig `json:"nodeConfig"` @@ -31,16 +32,17 @@ type PaginatedNodes struct { HasNextPage bool } -// NodeResponse represents a node with type-specific properties +// NodeResponse represents the response for node configuration type NodeResponse struct { - ID int64 `json:"id"` - Name string `json:"name"` - Platform string `json:"platform"` - Status string `json:"status"` - NodeType types.NodeType `json:"nodeType"` - Endpoint string `json:"endpoint"` - CreatedAt time.Time `json:"createdAt"` - UpdatedAt time.Time `json:"updatedAt"` + ID int64 `json:"id"` + Name string `json:"name"` + Platform string `json:"platform"` + Status string `json:"status"` + ErrorMessage string `json:"errorMessage"` + NodeType types.NodeType `json:"nodeType"` + Endpoint string `json:"endpoint"` + CreatedAt time.Time `json:"createdAt"` + UpdatedAt time.Time `json:"updatedAt"` // Type-specific fields FabricPeer *FabricPeerProperties `json:"fabricPeer,omitempty"` @@ -67,6 +69,9 @@ type FabricPeerProperties struct { TLSCert string `json:"tlsCert,omitempty"` SignCACert string `json:"signCaCert,omitempty"` TLSCACert string `json:"tlsCaCert,omitempty"` + + AddressOverrides []types.AddressOverride `json:"addressOverrides,omitempty"` + Version string `json:"version"` } // FabricOrdererProperties represents the properties specific to a Fabric orderer node @@ -87,6 +92,7 @@ type FabricOrdererProperties struct { TLSCert string `json:"tlsCert,omitempty"` SignCACert string `json:"signCaCert,omitempty"` TLSCACert string `json:"tlsCaCert,omitempty"` + Version string `json:"version"` } // BesuNodeProperties represents the properties specific to a Besu node @@ -98,8 +104,10 @@ type BesuNodeProperties struct { InternalIP string `json:"internalIp"` EnodeURL string `json:"enodeUrl"` // Add deployment config fields - P2PHost string `json:"p2pHost"` - RPCHost string `json:"rpcHost"` - KeyID int64 `json:"keyId"` - Mode string `json:"mode"` + P2PHost string `json:"p2pHost"` + RPCHost string `json:"rpcHost"` + KeyID int64 `json:"keyId"` + Mode string `json:"mode"` + Version string `json:"version"` + BootNodes []string `json:"bootNodes"` } diff --git a/pkg/nodes/service/defaults.go b/pkg/nodes/service/defaults.go index 1ae367d..5419a79 100644 --- a/pkg/nodes/service/defaults.go +++ b/pkg/nodes/service/defaults.go @@ -25,12 +25,14 @@ type NodeDefaults struct { // BesuNodeDefaults represents default values for a Besu node type BesuNodeDefaults struct { - P2PAddress string `json:"p2pAddress"` - RPCAddress string `json:"rpcAddress"` - NetworkID uint64 `json:"networkId"` - Mode Mode `json:"mode"` - ExternalIP string `json:"externalIP"` - InternalIP string `json:"internalIP"` + P2PHost string `json:"p2pHost"` + P2PPort uint `json:"p2pPort"` + RPCHost string `json:"rpcHost"` + RPCPort uint `json:"rpcPort"` + ExternalIP string `json:"externalIp"` + InternalIP string `json:"internalIp"` + Mode Mode `json:"mode"` + Env map[string]string `json:"environmentVariables"` } // NodesDefaultsParams represents parameters for getting multiple nodes defaults diff --git a/pkg/nodes/service/events.go b/pkg/nodes/service/events.go index 28b3f90..f0dc0e7 100644 --- a/pkg/nodes/service/events.go +++ b/pkg/nodes/service/events.go @@ -52,7 +52,7 @@ func (s *NodeEventService) CreateEvent(ctx context.Context, nodeID int64, eventT return fmt.Errorf("failed to marshal event data: %w", err) } - _, err = s.db.CreateNodeEvent(ctx, db.CreateNodeEventParams{ + _, err = s.db.CreateNodeEvent(ctx, &db.CreateNodeEventParams{ NodeID: nodeID, EventType: string(eventType), Data: sql.NullString{String: string(dataJSON), Valid: true}, @@ -69,7 +69,7 @@ func (s *NodeEventService) CreateEvent(ctx context.Context, nodeID int64, eventT // GetEvents retrieves a paginated list of node events func (s *NodeEventService) GetEvents(ctx context.Context, nodeID int64, page, limit int) ([]NodeEvent, error) { offset := (page - 1) * limit - dbEvents, err := s.db.ListNodeEvents(ctx, db.ListNodeEventsParams{ + dbEvents, err := s.db.ListNodeEvents(ctx, &db.ListNodeEventsParams{ NodeID: nodeID, Limit: int64(limit), Offset: int64(offset), @@ -91,7 +91,7 @@ func (s *NodeEventService) GetLatestEvent(ctx context.Context, nodeID int64) (*N return nil, fmt.Errorf("failed to get latest node event: %w", err) } - events := s.mapDBEventsToNodeEvents([]db.NodeEvent{dbEvent}) + events := s.mapDBEventsToNodeEvents([]*db.NodeEvent{dbEvent}) if len(events) == 0 { return nil, nil } @@ -101,7 +101,7 @@ func (s *NodeEventService) GetLatestEvent(ctx context.Context, nodeID int64) (*N // GetEventsByType retrieves events of a specific type for a node func (s *NodeEventService) GetEventsByType(ctx context.Context, nodeID int64, eventType NodeEventType, page, limit int) ([]NodeEvent, error) { offset := (page - 1) * limit - dbEvents, err := s.db.ListNodeEventsByType(ctx, db.ListNodeEventsByTypeParams{ + dbEvents, err := s.db.ListNodeEventsByType(ctx, &db.ListNodeEventsByTypeParams{ NodeID: nodeID, EventType: string(eventType), Limit: int64(limit), @@ -115,7 +115,7 @@ func (s *NodeEventService) GetEventsByType(ctx context.Context, nodeID int64, ev } // mapDBEventsToNodeEvents converts database events to service layer events -func (s *NodeEventService) mapDBEventsToNodeEvents(dbEvents []db.NodeEvent) []NodeEvent { +func (s *NodeEventService) mapDBEventsToNodeEvents(dbEvents []*db.NodeEvent) []NodeEvent { events := make([]NodeEvent, len(dbEvents)) for i, dbEvent := range dbEvents { // var data interface{} diff --git a/pkg/nodes/service/service.go b/pkg/nodes/service/service.go index 1118e1d..b0eb0d8 100644 --- a/pkg/nodes/service/service.go +++ b/pkg/nodes/service/service.go @@ -12,17 +12,21 @@ import ( "runtime" "strconv" "strings" + "time" + "github.com/chainlaunch/chainlaunch/pkg/config" "github.com/chainlaunch/chainlaunch/pkg/db" "github.com/chainlaunch/chainlaunch/pkg/errors" fabricservice "github.com/chainlaunch/chainlaunch/pkg/fabric/service" keymanagement "github.com/chainlaunch/chainlaunch/pkg/keymanagement/service" "github.com/chainlaunch/chainlaunch/pkg/logger" + networktypes "github.com/chainlaunch/chainlaunch/pkg/networks/service/types" "github.com/chainlaunch/chainlaunch/pkg/nodes/besu" "github.com/chainlaunch/chainlaunch/pkg/nodes/orderer" "github.com/chainlaunch/chainlaunch/pkg/nodes/peer" "github.com/chainlaunch/chainlaunch/pkg/nodes/types" "github.com/chainlaunch/chainlaunch/pkg/nodes/utils" + settingsservice "github.com/chainlaunch/chainlaunch/pkg/settings/service" ) // NodeService handles business logic for node management @@ -32,6 +36,8 @@ type NodeService struct { keymanagementService *keymanagement.KeyManagementService orgService *fabricservice.OrganizationService eventService *NodeEventService + configService *config.ConfigService + settingsService *settingsservice.SettingsService } // CreateNodeRequest represents the service-layer request to create a node @@ -51,6 +57,8 @@ func NewNodeService( keymanagementService *keymanagement.KeyManagementService, orgService *fabricservice.OrganizationService, eventService *NodeEventService, + configService *config.ConfigService, + settingsService *settingsservice.SettingsService, ) *NodeService { return &NodeService{ db: db, @@ -58,6 +66,8 @@ func NewNodeService( keymanagementService: keymanagementService, orgService: orgService, eventService: eventService, + configService: configService, + settingsService: settingsService, } } @@ -127,45 +137,39 @@ func (s *NodeService) validateAddress(address string) error { // validateFabricPeerAddresses validates all addresses used by a Fabric peer func (s *NodeService) validateFabricPeerAddresses(config *types.FabricPeerConfig) error { - // Validate listen address - if err := s.validateAddress(config.ListenAddress); err != nil { - return fmt.Errorf("invalid listen address: %w", err) - } - - // Validate chaincode address - if err := s.validateAddress(config.ChaincodeAddress); err != nil { - return fmt.Errorf("invalid chaincode address: %w", err) - } - - // Validate events address - if err := s.validateAddress(config.EventsAddress); err != nil { - return fmt.Errorf("invalid events address: %w", err) - } - - // Validate operations listen address - if err := s.validateAddress(config.OperationsListenAddress); err != nil { - return fmt.Errorf("invalid operations listen address: %w", err) - } - - // Check for port conflicts between addresses - addresses := map[string]string{ + // Get current addresses to compare against + currentAddresses := map[string]string{ "listen": config.ListenAddress, "chaincode": config.ChaincodeAddress, "events": config.EventsAddress, "operations": config.OperationsListenAddress, } + // Check for port conflicts between addresses usedPorts := make(map[string]string) - for addrType, addr := range addresses { + for addrType, addr := range currentAddresses { _, port, err := net.SplitHostPort(addr) if err != nil { return fmt.Errorf("invalid %s address format: %w", addrType, err) } if existingType, exists := usedPorts[port]; exists { + // If the port is already used by the same address type, it's okay + if existingType == addrType { + continue + } return fmt.Errorf("port conflict: %s and %s addresses use the same port %s", existingType, addrType, port) } usedPorts[port] = addrType + + // Only validate port availability if it's not already in use by this peer + if err := s.validateAddress(addr); err != nil { + // Check if the error is due to the port being in use by this peer + if strings.Contains(err.Error(), "address already in use") { + continue + } + return fmt.Errorf("invalid %s address: %w", addrType, err) + } } return nil @@ -237,7 +241,7 @@ func (s *NodeService) generateSlug(name string) string { // GetAllNodes retrieves all nodes without pagination func (s *NodeService) GetAllNodes(ctx context.Context) (*PaginatedNodes, error) { // Get all nodes from the database - dbNodes, err := s.db.ListNodes(ctx, db.ListNodesParams{ + dbNodes, err := s.db.ListNodes(ctx, &db.ListNodesParams{ Limit: 1000, // Use a high limit to get all nodes Offset: 0, }) @@ -329,7 +333,7 @@ func (s *NodeService) CreateNode(ctx context.Context, req CreateNodeRequest) (*N } // Create node in database - node, err := s.db.CreateNode(ctx, db.CreateNodeParams{ + node, err := s.db.CreateNode(ctx, &db.CreateNodeParams{ Name: req.Name, Slug: slug, Platform: string(req.BlockchainPlatform), @@ -346,7 +350,7 @@ func (s *NodeService) CreateNode(ctx context.Context, req CreateNodeRequest) (*N deploymentConfig, err := s.initializeNode(ctx, node, req) if err != nil { // Update node status to failed if initialization fails - s.updateNodeStatus(ctx, node.ID, types.NodeStatusError) + s.updateNodeStatusWithError(ctx, node.ID, types.NodeStatusError, fmt.Sprintf("Failed to initialize node: %v", err)) return nil, fmt.Errorf("failed to initialize node: %w", err) } @@ -357,7 +361,7 @@ func (s *NodeService) CreateNode(ctx context.Context, req CreateNodeRequest) (*N } // Update node with deployment config - node, err = s.db.UpdateNodeDeploymentConfig(ctx, db.UpdateNodeDeploymentConfigParams{ + node, err = s.db.UpdateNodeDeploymentConfig(ctx, &db.UpdateNodeDeploymentConfigParams{ ID: node.ID, DeploymentConfig: sql.NullString{String: string(deploymentConfigJSON), Valid: true}, }) @@ -442,7 +446,7 @@ func (s *NodeService) createNodeConfig(req CreateNodeRequest) (types.NodeConfig, } // initializeNode initializes a node and returns its deployment config -func (s *NodeService) initializeNode(ctx context.Context, dbNode db.Node, req CreateNodeRequest) (types.NodeDeploymentConfig, error) { +func (s *NodeService) initializeNode(ctx context.Context, dbNode *db.Node, req CreateNodeRequest) (types.NodeDeploymentConfig, error) { switch types.BlockchainPlatform(dbNode.Platform) { case types.PlatformFabric: if req.FabricPeer != nil { @@ -471,7 +475,7 @@ func (s *NodeService) initializeNode(ctx context.Context, dbNode db.Node, req Cr } // getPeerFromConfig creates a peer instance from the given configuration and database node -func (s *NodeService) getPeerFromConfig(dbNode db.Node, org *fabricservice.OrganizationDTO, config *types.FabricPeerConfig) *peer.LocalPeer { +func (s *NodeService) getPeerFromConfig(dbNode *db.Node, org *fabricservice.OrganizationDTO, config *types.FabricPeerConfig) *peer.LocalPeer { return peer.NewLocalPeer( org.MspID, s.db, @@ -485,6 +489,7 @@ func (s *NodeService) getPeerFromConfig(dbNode db.Node, org *fabricservice.Organ DomainNames: config.DomainNames, Env: config.Env, Version: config.Version, + AddressOverrides: config.AddressOverrides, }, config.Mode, org, @@ -493,11 +498,13 @@ func (s *NodeService) getPeerFromConfig(dbNode db.Node, org *fabricservice.Organ s.keymanagementService, dbNode.ID, s.logger, + s.configService, + s.settingsService, ) } // initializeFabricPeer initializes a Fabric peer node -func (s *NodeService) initializeFabricPeer(ctx context.Context, dbNode db.Node, req *types.FabricPeerConfig) (types.NodeDeploymentConfig, error) { +func (s *NodeService) initializeFabricPeer(ctx context.Context, dbNode *db.Node, req *types.FabricPeerConfig) (types.NodeDeploymentConfig, error) { org, err := s.orgService.GetOrganization(ctx, req.OrganizationID) if err != nil { return nil, fmt.Errorf("failed to get organization: %w", err) @@ -515,7 +522,7 @@ func (s *NodeService) initializeFabricPeer(ctx context.Context, dbNode db.Node, } // getOrdererFromConfig creates a LocalOrderer instance from configuration -func (s *NodeService) getOrdererFromConfig(dbNode db.Node, org *fabricservice.OrganizationDTO, config *types.FabricOrdererConfig) *orderer.LocalOrderer { +func (s *NodeService) getOrdererFromConfig(dbNode *db.Node, org *fabricservice.OrganizationDTO, config *types.FabricOrdererConfig) *orderer.LocalOrderer { return orderer.NewLocalOrderer( org.MspID, s.db, @@ -523,11 +530,12 @@ func (s *NodeService) getOrdererFromConfig(dbNode db.Node, org *fabricservice.Or ID: dbNode.Name, ListenAddress: config.ListenAddress, OperationsListenAddress: config.OperationsListenAddress, - AdminAddress: config.AdminAddress, + AdminListenAddress: config.AdminAddress, ExternalEndpoint: config.ExternalEndpoint, DomainNames: config.DomainNames, Env: config.Env, Version: config.Version, + AddressOverrides: config.AddressOverrides, }, config.Mode, org, @@ -536,11 +544,13 @@ func (s *NodeService) getOrdererFromConfig(dbNode db.Node, org *fabricservice.Or s.keymanagementService, dbNode.ID, s.logger, + s.configService, + s.settingsService, ) } // initializeFabricOrderer initializes a Fabric orderer node -func (s *NodeService) initializeFabricOrderer(ctx context.Context, dbNode db.Node, req *types.FabricOrdererConfig) (*types.FabricOrdererDeploymentConfig, error) { +func (s *NodeService) initializeFabricOrderer(ctx context.Context, dbNode *db.Node, req *types.FabricOrdererConfig) (*types.FabricOrdererDeploymentConfig, error) { org, err := s.orgService.GetOrganization(ctx, req.OrganizationID) if err != nil { return nil, fmt.Errorf("failed to get organization: %w", err) @@ -564,7 +574,7 @@ func (s *NodeService) initializeFabricOrderer(ctx context.Context, dbNode db.Nod } // initializeBesuNode initializes a Besu node -func (s *NodeService) initializeBesuNode(ctx context.Context, dbNode db.Node, config *types.BesuNodeConfig) (types.NodeDeploymentConfig, error) { +func (s *NodeService) initializeBesuNode(ctx context.Context, dbNode *db.Node, config *types.BesuNodeConfig) (types.NodeDeploymentConfig, error) { // Validate key exists key, err := s.keymanagementService.GetKey(ctx, int(config.KeyID)) if err != nil { @@ -602,7 +612,7 @@ func (s *NodeService) initializeBesuNode(ctx context.Context, dbNode db.Node, co // Update node endpoint endpoint := fmt.Sprintf("%s:%d", config.P2PHost, config.P2PPort) - _, err = s.db.UpdateNodeEndpoint(ctx, db.UpdateNodeEndpointParams{ + _, err = s.db.UpdateNodeEndpoint(ctx, &db.UpdateNodeEndpointParams{ ID: dbNode.ID, Endpoint: sql.NullString{ String: endpoint, @@ -616,7 +626,7 @@ func (s *NodeService) initializeBesuNode(ctx context.Context, dbNode db.Node, co // Update node public endpoint if external IP is set if config.ExternalIP != "" { publicEndpoint := fmt.Sprintf("%s:%d", config.ExternalIP, config.P2PPort) - _, err = s.db.UpdateNodePublicEndpoint(ctx, db.UpdateNodePublicEndpointParams{ + _, err = s.db.UpdateNodePublicEndpoint(ctx, &db.UpdateNodePublicEndpointParams{ ID: dbNode.ID, PublicEndpoint: sql.NullString{ String: publicEndpoint, @@ -649,35 +659,25 @@ func (s *NodeService) validatePort(host string, port int) error { // updateNodeStatus updates the status of a node in the database func (s *NodeService) updateNodeStatus(ctx context.Context, nodeID int64, status types.NodeStatus) error { - _, err := s.db.UpdateNodeStatus(ctx, db.UpdateNodeStatusParams{ + _, err := s.db.UpdateNodeStatus(ctx, &db.UpdateNodeStatusParams{ ID: nodeID, Status: string(status), }) if err != nil { return fmt.Errorf("failed to update node status: %w", err) } - dataBytes, err := json.Marshal(map[string]string{"status": string(status)}) - if err != nil { - return fmt.Errorf("failed to marshal node status: %w", err) - } - // Add node status change to event history - _, err = s.db.CreateNodeEvent(ctx, db.CreateNodeEventParams{ - NodeID: nodeID, - EventType: string(status), - Data: sql.NullString{String: string(dataBytes), Valid: true}, - Description: "status changed", - Status: string(status), + return nil +} + +func (s *NodeService) updateNodeStatusWithError(ctx context.Context, nodeID int64, status types.NodeStatus, errorMessage string) error { + _, err := s.db.UpdateNodeStatusWithError(ctx, &db.UpdateNodeStatusWithErrorParams{ + ID: nodeID, + Status: string(status), + ErrorMessage: sql.NullString{String: errorMessage, Valid: true}, }) if err != nil { - return fmt.Errorf("failed to create node event: %w", err) + return fmt.Errorf("failed to update node status with error: %w", err) } - - // Log the status change - s.logger.Info("Node status updated", - "nodeID", nodeID, - "status", status, - ) - return nil } @@ -698,7 +698,7 @@ func (s *NodeService) GetNode(ctx context.Context, id int64) (*NodeResponse, err // ListNodes retrieves a paginated list of nodes func (s *NodeService) ListNodes(ctx context.Context, platform *types.BlockchainPlatform, page, limit int) (*PaginatedNodes, error) { - var dbNodes []db.Node + var dbNodes []*db.Node var err error var total int64 @@ -706,7 +706,7 @@ func (s *NodeService) ListNodes(ctx context.Context, platform *types.BlockchainP if platform != nil { // Get nodes filtered by platform - dbNodes, err = s.db.ListNodesByPlatform(ctx, db.ListNodesByPlatformParams{ + dbNodes, err = s.db.ListNodesByPlatform(ctx, &db.ListNodesByPlatformParams{ Platform: string(*platform), Limit: int64(limit), Offset: int64(offset), @@ -717,7 +717,7 @@ func (s *NodeService) ListNodes(ctx context.Context, platform *types.BlockchainP total, err = s.db.CountNodesByPlatform(ctx, string(*platform)) } else { // Get all nodes - dbNodes, err = s.db.ListNodes(ctx, db.ListNodesParams{ + dbNodes, err = s.db.ListNodes(ctx, &db.ListNodesParams{ Limit: int64(limit), Offset: int64(offset), }) @@ -747,7 +747,7 @@ func (s *NodeService) ListNodes(ctx context.Context, platform *types.BlockchainP } // Update mapDBNodeToServiceNode to include deployment config and MSPID -func (s *NodeService) mapDBNodeToServiceNode(dbNode db.Node) (*Node, *NodeResponse) { +func (s *NodeService) mapDBNodeToServiceNode(dbNode *db.Node) (*Node, *NodeResponse) { ctx := context.Background() var nodeConfig types.NodeConfig var deploymentConfig types.NodeDeploymentConfig @@ -783,18 +783,20 @@ func (s *NodeService) mapDBNodeToServiceNode(dbNode db.Node) (*Node, *NodeRespon DeploymentConfig: deploymentConfig, CreatedAt: dbNode.CreatedAt, UpdatedAt: dbNode.UpdatedAt.Time, + ErrorMessage: dbNode.ErrorMessage.String, } // Create node response nodeResponse := &NodeResponse{ - ID: dbNode.ID, - Name: dbNode.Name, - Platform: dbNode.Platform, - Status: dbNode.Status, - NodeType: types.NodeType(dbNode.NodeType.String), - Endpoint: dbNode.Endpoint.String, - CreatedAt: dbNode.CreatedAt, - UpdatedAt: dbNode.UpdatedAt.Time, + ID: dbNode.ID, + Name: dbNode.Name, + Platform: dbNode.Platform, + Status: dbNode.Status, + NodeType: types.NodeType(dbNode.NodeType.String), + ErrorMessage: dbNode.ErrorMessage.String, + Endpoint: dbNode.Endpoint.String, + CreatedAt: dbNode.CreatedAt, + UpdatedAt: dbNode.UpdatedAt.Time, } // Add type-specific properties @@ -811,22 +813,25 @@ func (s *NodeService) mapDBNodeToServiceNode(dbNode db.Node) (*Node, *NodeRespon OperationsAddress: config.OperationsListenAddress, ListenAddress: config.ListenAddress, DomainNames: config.DomainNames, + Version: config.Version, } // Enrich with deployment config if available if peerDeployConfig, ok := deploymentConfig.(*types.FabricPeerDeploymentConfig); ok { - nodeResponse.FabricPeer.ExternalEndpoint = peerDeployConfig.ExternalEndpoint - nodeResponse.FabricPeer.ListenAddress = peerDeployConfig.ListenAddress - nodeResponse.FabricPeer.ChaincodeAddress = peerDeployConfig.ChaincodeAddress - nodeResponse.FabricPeer.EventsAddress = peerDeployConfig.EventsAddress - nodeResponse.FabricPeer.OperationsAddress = peerDeployConfig.OperationsListenAddress + nodeResponse.FabricPeer.ExternalEndpoint = config.ExternalEndpoint + nodeResponse.FabricPeer.ListenAddress = config.ListenAddress + nodeResponse.FabricPeer.ChaincodeAddress = config.ChaincodeAddress + nodeResponse.FabricPeer.EventsAddress = config.EventsAddress + nodeResponse.FabricPeer.OperationsAddress = config.OperationsListenAddress nodeResponse.FabricPeer.TLSKeyID = peerDeployConfig.TLSKeyID nodeResponse.FabricPeer.SignKeyID = peerDeployConfig.SignKeyID - nodeResponse.FabricPeer.Mode = peerDeployConfig.Mode + nodeResponse.FabricPeer.Mode = config.Mode } // Add certificate information - peerConfig, ok := nodeConfig.(*types.FabricPeerConfig) + peerConfig, _ := nodeConfig.(*types.FabricPeerConfig) + peerDeployConfig, ok := deploymentConfig.(*types.FabricPeerDeploymentConfig) if ok && peerConfig != nil { + nodeResponse.FabricPeer.AddressOverrides = peerDeployConfig.AddressOverrides // Get certificates from key service signKey, err := s.keymanagementService.GetKey(ctx, int(peerDeployConfig.SignKeyID)) if err == nil && signKey.Certificate != nil { @@ -869,16 +874,17 @@ func (s *NodeService) mapDBNodeToServiceNode(dbNode db.Node) (*Node, *NodeRespon OperationsAddress: config.OperationsListenAddress, ListenAddress: config.ListenAddress, DomainNames: config.DomainNames, + Version: config.Version, } // Enrich with deployment config if available if ordererDeployConfig, ok := deploymentConfig.(*types.FabricOrdererDeploymentConfig); ok { - nodeResponse.FabricOrderer.ExternalEndpoint = ordererDeployConfig.ExternalEndpoint - nodeResponse.FabricOrderer.ListenAddress = ordererDeployConfig.ListenAddress - nodeResponse.FabricOrderer.AdminAddress = ordererDeployConfig.AdminAddress - nodeResponse.FabricOrderer.OperationsAddress = ordererDeployConfig.OperationsListenAddress + nodeResponse.FabricOrderer.ExternalEndpoint = config.ExternalEndpoint + nodeResponse.FabricOrderer.ListenAddress = config.ListenAddress + nodeResponse.FabricOrderer.AdminAddress = config.AdminAddress + nodeResponse.FabricOrderer.OperationsAddress = config.OperationsListenAddress nodeResponse.FabricOrderer.TLSKeyID = ordererDeployConfig.TLSKeyID nodeResponse.FabricOrderer.SignKeyID = ordererDeployConfig.SignKeyID - nodeResponse.FabricOrderer.Mode = ordererDeployConfig.Mode + nodeResponse.FabricOrderer.Mode = config.Mode } // Add certificate information ordererConfig, ok := nodeConfig.(*types.FabricOrdererConfig) @@ -913,23 +919,23 @@ func (s *NodeService) mapDBNodeToServiceNode(dbNode db.Node) (*Node, *NodeRespon } } } - } - } - - if deploymentConfig != nil { - switch config := deploymentConfig.(type) { - case *types.BesuNodeDeploymentConfig: + case *types.BesuNodeConfig: nodeResponse.BesuNode = &BesuNodeProperties{ NetworkID: config.NetworkID, P2PPort: config.P2PPort, RPCPort: config.RPCPort, ExternalIP: config.ExternalIP, InternalIP: config.InternalIP, - EnodeURL: config.EnodeURL, P2PHost: config.P2PHost, RPCHost: config.RPCHost, KeyID: config.KeyID, Mode: config.Mode, + BootNodes: config.BootNodes, + } + deployConfig, ok := deploymentConfig.(*types.BesuNodeDeploymentConfig) + if ok { + nodeResponse.BesuNode.KeyID = deployConfig.KeyID + nodeResponse.BesuNode.EnodeURL = deployConfig.EnodeURL } } } @@ -978,7 +984,7 @@ func (s *NodeService) StopNode(ctx context.Context, id int64) (*NodeResponse, er if stopErr != nil { s.logger.Error("Failed to stop node", "error", stopErr) // Update status to error if stop failed - if err := s.updateNodeStatus(ctx, id, types.NodeStatusError); err != nil { + if err := s.updateNodeStatusWithError(ctx, id, types.NodeStatusError, fmt.Sprintf("Failed to stop node: %v", stopErr)); err != nil { s.logger.Error("Failed to update node status after stop error", "error", err) } return nil, fmt.Errorf("failed to stop node: %w", stopErr) @@ -994,7 +1000,7 @@ func (s *NodeService) StopNode(ctx context.Context, id int64) (*NodeResponse, er } // startNode starts a node based on its type and configuration -func (s *NodeService) startNode(ctx context.Context, dbNode db.Node) error { +func (s *NodeService) startNode(ctx context.Context, dbNode *db.Node) error { // Update status to starting if err := s.updateNodeStatus(ctx, dbNode.ID, types.NodeStatusStarting); err != nil { return fmt.Errorf("failed to update node status: %w", err) @@ -1013,8 +1019,9 @@ func (s *NodeService) startNode(ctx context.Context, dbNode db.Node) error { } if startErr != nil { + s.logger.Error("Failed to start node", "error", startErr) // Update status to error if start failed - if err := s.updateNodeStatus(ctx, dbNode.ID, types.NodeStatusError); err != nil { + if err := s.updateNodeStatusWithError(ctx, dbNode.ID, types.NodeStatusError, fmt.Sprintf("Failed to start node: %v", startErr)); err != nil { s.logger.Error("Failed to update node status after start error", "error", err) } return fmt.Errorf("failed to start node: %w", startErr) @@ -1029,7 +1036,7 @@ func (s *NodeService) startNode(ctx context.Context, dbNode db.Node) error { } // startFabricPeer starts a Fabric peer node -func (s *NodeService) startFabricPeer(ctx context.Context, dbNode db.Node) error { +func (s *NodeService) startFabricPeer(ctx context.Context, dbNode *db.Node) error { nodeConfig, err := utils.LoadNodeConfig([]byte(dbNode.NodeConfig.String)) if err != nil { @@ -1064,7 +1071,7 @@ func (s *NodeService) startFabricPeer(ctx context.Context, dbNode db.Node) error } // stopFabricPeer stops a Fabric peer node -func (s *NodeService) stopFabricPeer(ctx context.Context, dbNode db.Node) error { +func (s *NodeService) stopFabricPeer(ctx context.Context, dbNode *db.Node) error { deploymentConfig, err := utils.DeserializeDeploymentConfig(dbNode.NodeConfig.String) if err != nil { return fmt.Errorf("failed to deserialize deployment config: %w", err) @@ -1096,7 +1103,7 @@ func (s *NodeService) stopFabricPeer(ctx context.Context, dbNode db.Node) error } // startFabricOrderer starts a Fabric orderer node -func (s *NodeService) startFabricOrderer(ctx context.Context, dbNode db.Node) error { +func (s *NodeService) startFabricOrderer(ctx context.Context, dbNode *db.Node) error { nodeConfig, err := utils.LoadNodeConfig([]byte(dbNode.NodeConfig.String)) if err != nil { return fmt.Errorf("failed to deserialize node config: %w", err) @@ -1122,7 +1129,7 @@ func (s *NodeService) startFabricOrderer(ctx context.Context, dbNode db.Node) er } // stopFabricOrderer stops a Fabric orderer node -func (s *NodeService) stopFabricOrderer(ctx context.Context, dbNode db.Node) error { +func (s *NodeService) stopFabricOrderer(ctx context.Context, dbNode *db.Node) error { nodeConfig, err := utils.LoadNodeConfig([]byte(dbNode.NodeConfig.String)) if err != nil { return fmt.Errorf("failed to deserialize node config: %w", err) @@ -1147,7 +1154,7 @@ func (s *NodeService) stopFabricOrderer(ctx context.Context, dbNode db.Node) err return nil } -func (s *NodeService) getBesuFromConfig(ctx context.Context, dbNode db.Node, config *types.BesuNodeConfig, deployConfig *types.BesuNodeDeploymentConfig) (*besu.LocalBesu, error) { +func (s *NodeService) getBesuFromConfig(ctx context.Context, dbNode *db.Node, config *types.BesuNodeConfig, deployConfig *types.BesuNodeDeploymentConfig) (*besu.LocalBesu, error) { network, err := s.db.GetNetwork(ctx, deployConfig.NetworkID) if err != nil { return nil, fmt.Errorf("failed to get network: %w", err) @@ -1160,6 +1167,11 @@ func (s *NodeService) getBesuFromConfig(ctx context.Context, dbNode db.Node, con if err != nil { return nil, fmt.Errorf("failed to decrypt key: %w", err) } + var networkConfig networktypes.BesuNetworkConfig + if err := json.Unmarshal([]byte(network.Config.String), &networkConfig); err != nil { + return nil, fmt.Errorf("failed to unmarshal network config: %w", err) + } + localBesu := besu.NewLocalBesu( besu.StartBesuOpts{ ID: dbNode.Slug, @@ -1171,20 +1183,25 @@ func (s *NodeService) getBesuFromConfig(ctx context.Context, dbNode db.Node, con MinerAddress: key.EthereumAddress, ConsensusType: "qbft", // TODO: get consensus type from network BootNodes: config.BootNodes, - Version: "25.2.0", // TODO: get version from network + Version: "25.4.1", // TODO: get version from network NodePrivateKey: strings.TrimPrefix(privateKeyDecrypted, "0x"), Env: config.Env, + P2PHost: config.P2PHost, + RPCHost: config.RPCHost, }, string(config.Mode), dbNode.ID, s.logger, + s.configService, + s.settingsService, + networkConfig, ) return localBesu, nil } // stopBesuNode stops a Besu node -func (s *NodeService) stopBesuNode(ctx context.Context, dbNode db.Node) error { +func (s *NodeService) stopBesuNode(ctx context.Context, dbNode *db.Node) error { // Load node configuration nodeConfig, err := utils.LoadNodeConfig([]byte(dbNode.NodeConfig.String)) if err != nil { @@ -1221,7 +1238,7 @@ func (s *NodeService) stopBesuNode(ctx context.Context, dbNode db.Node) error { } // startBesuNode starts a Besu node -func (s *NodeService) startBesuNode(ctx context.Context, dbNode db.Node) error { +func (s *NodeService) startBesuNode(ctx context.Context, dbNode *db.Node) error { // Load node configuration nodeConfig, err := utils.LoadNodeConfig([]byte(dbNode.NodeConfig.String)) if err != nil { @@ -1255,6 +1272,10 @@ func (s *NodeService) startBesuNode(ctx context.Context, dbNode db.Node) error { if err != nil { return fmt.Errorf("failed to decrypt key: %w", err) } + var networkConfig networktypes.BesuNetworkConfig + if err := json.Unmarshal([]byte(network.Config.String), &networkConfig); err != nil { + return fmt.Errorf("failed to unmarshal network config: %w", err) + } // Create LocalBesu instance localBesu := besu.NewLocalBesu( @@ -1262,19 +1283,25 @@ func (s *NodeService) startBesuNode(ctx context.Context, dbNode db.Node) error { ID: dbNode.Slug, GenesisFile: network.GenesisBlockB64.String, NetworkID: besuDeployConfig.NetworkID, + ChainID: networkConfig.ChainID, P2PPort: fmt.Sprintf("%d", besuDeployConfig.P2PPort), RPCPort: fmt.Sprintf("%d", besuDeployConfig.RPCPort), ListenAddress: besuDeployConfig.P2PHost, MinerAddress: key.EthereumAddress, ConsensusType: "qbft", // TODO: get consensus type from network BootNodes: besuNodeConfig.BootNodes, - Version: "25.2.0", // TODO: get version from network + Version: "25.4.1", // TODO: get version from network NodePrivateKey: strings.TrimPrefix(privateKeyDecrypted, "0x"), Env: besuNodeConfig.Env, + P2PHost: besuNodeConfig.P2PHost, + RPCHost: besuNodeConfig.RPCHost, }, string(besuNodeConfig.Mode), dbNode.ID, s.logger, + s.configService, + s.settingsService, + networkConfig, ) // Start the node @@ -1341,18 +1368,12 @@ func (s *NodeService) DeleteNode(ctx context.Context, id int64) error { } // cleanupPeerResources cleans up resources specific to a Fabric peer node -func (s *NodeService) cleanupPeerResources(ctx context.Context, node db.Node) error { - // Get the home directory - homeDir, err := os.UserHomeDir() - if err != nil { - return fmt.Errorf("failed to get home directory: %w", err) - } - +func (s *NodeService) cleanupPeerResources(ctx context.Context, node *db.Node) error { // Clean up peer-specific directories dirsToClean := []string{ - filepath.Join(homeDir, ".chainlaunch", "nodes", node.Slug), - filepath.Join(homeDir, ".chainlaunch", "peers", node.Slug), - filepath.Join(homeDir, ".chainlaunch", "fabric", "peers", node.Slug), + filepath.Join(s.configService.GetDataPath(), "nodes", node.Slug), + filepath.Join(s.configService.GetDataPath(), "peers", node.Slug), + filepath.Join(s.configService.GetDataPath(), "fabric", "peers", node.Slug), } for _, dir := range dirsToClean { @@ -1372,18 +1393,13 @@ func (s *NodeService) cleanupPeerResources(ctx context.Context, node db.Node) er } // cleanupOrdererResources cleans up resources specific to a Fabric orderer node -func (s *NodeService) cleanupOrdererResources(ctx context.Context, node db.Node) error { - // Get the home directory - homeDir, err := os.UserHomeDir() - if err != nil { - return fmt.Errorf("failed to get home directory: %w", err) - } +func (s *NodeService) cleanupOrdererResources(ctx context.Context, node *db.Node) error { // Clean up orderer-specific directories dirsToClean := []string{ - filepath.Join(homeDir, ".chainlaunch", "nodes", node.Slug), - filepath.Join(homeDir, ".chainlaunch", "orderers", node.Slug), - filepath.Join(homeDir, ".chainlaunch", "fabric", "orderers", node.Slug), + filepath.Join(s.configService.GetDataPath(), "nodes", node.Slug), + filepath.Join(s.configService.GetDataPath(), "orderers", node.Slug), + filepath.Join(s.configService.GetDataPath(), "fabric", "orderers", node.Slug), } for _, dir := range dirsToClean { @@ -1403,12 +1419,7 @@ func (s *NodeService) cleanupOrdererResources(ctx context.Context, node db.Node) } // cleanupBesuResources cleans up resources specific to a Besu node -func (s *NodeService) cleanupBesuResources(ctx context.Context, node db.Node) error { - // Get the home directory - homeDir, err := os.UserHomeDir() - if err != nil { - return fmt.Errorf("failed to get home directory: %w", err) - } +func (s *NodeService) cleanupBesuResources(ctx context.Context, node *db.Node) error { // Load node configuration nodeConfig, err := utils.LoadNodeConfig([]byte(node.NodeConfig.String)) @@ -1453,9 +1464,9 @@ func (s *NodeService) cleanupBesuResources(ctx context.Context, node db.Node) er // Clean up Besu-specific directories dirsToClean := []string{ - filepath.Join(homeDir, ".chainlaunch", "nodes", node.Slug), - filepath.Join(homeDir, ".chainlaunch", "besu", node.Slug), - filepath.Join(homeDir, ".chainlaunch", "besu", "nodes", node.Slug), + filepath.Join(s.configService.GetDataPath(), "nodes", node.Slug), + filepath.Join(s.configService.GetDataPath(), "besu", node.Slug), + filepath.Join(s.configService.GetDataPath(), "besu", "nodes", node.Slug), } for _, dir := range dirsToClean { @@ -1486,8 +1497,12 @@ func (s *NodeService) cleanupBesuResources(ctx context.Context, node db.Node) er case "darwin": // Remove launchd plist file + homeDir, err := os.UserHomeDir() + if err != nil { + return fmt.Errorf("failed to get home directory: %w", err) + } if localBesu != nil { - plistFile := filepath.Join(homeDir, "Library/LaunchAgents", fmt.Sprintf("ai.chainlaunch.besu.%s.plist", node.Slug)) + plistFile := filepath.Join(homeDir, "Library/LaunchAgents", fmt.Sprintf("dev.chainlaunch.besu.%s.plist", node.Slug)) if err := os.Remove(plistFile); err != nil { if !os.IsNotExist(err) { s.logger.Warn("Failed to remove launchd plist file", "error", err) @@ -1497,7 +1512,7 @@ func (s *NodeService) cleanupBesuResources(ctx context.Context, node db.Node) er } // Clean up any data directories - dataDir := filepath.Join(homeDir, ".chainlaunch", "data", "besu", node.Slug) + dataDir := filepath.Join(s.configService.GetDataPath(), "data", "besu", node.Slug) if err := os.RemoveAll(dataDir); err != nil { if !os.IsNotExist(err) { s.logger.Warn("Failed to remove Besu data directory", @@ -1513,7 +1528,7 @@ func (s *NodeService) cleanupBesuResources(ctx context.Context, node db.Node) er } // Update cleanupNodeResources to use the new function -func (s *NodeService) cleanupNodeResources(ctx context.Context, node db.Node) error { +func (s *NodeService) cleanupNodeResources(ctx context.Context, node *db.Node) error { // Get the home directory homeDir, err := os.UserHomeDir() if err != nil { @@ -1538,7 +1553,7 @@ func (s *NodeService) cleanupNodeResources(ctx context.Context, node db.Node) er case "darwin": // Remove launchd plist file - plistFile := filepath.Join(homeDir, "Library/LaunchAgents", fmt.Sprintf("ai.chainlaunch.%s.plist", deploymentConfig.GetServiceName())) + plistFile := filepath.Join(homeDir, "Library/LaunchAgents", fmt.Sprintf("dev.chainlaunch.%s.plist", deploymentConfig.GetServiceName())) if err := os.Remove(plistFile); err != nil { if !os.IsNotExist(err) { s.logger.Warn("Failed to remove launchd plist file", "error", err) @@ -1652,8 +1667,8 @@ const ( maxPortAttempts = 100 // Maximum attempts to find available ports ) -// GetNodesDefaults returns default values for multiple nodes with guaranteed non-overlapping ports -func (s *NodeService) GetNodesDefaults(params NodesDefaultsParams) (*NodesDefaultsResult, error) { +// GetFabricNodesDefaults returns default values for multiple nodes with guaranteed non-overlapping ports +func (s *NodeService) GetFabricNodesDefaults(params NodesDefaultsParams) (*NodesDefaultsResult, error) { // Validate node counts if params.PeerCount > 15 { return nil, fmt.Errorf("peer count exceeds maximum supported nodes (15)") @@ -1736,6 +1751,81 @@ func (s *NodeService) GetNodesDefaults(params NodesDefaultsParams) (*NodesDefaul return result, nil } +func (s *NodeService) GetNodeLogPath(ctx context.Context, node *NodeResponse) (string, error) { + dbNode, err := s.db.GetNode(ctx, node.ID) + if err != nil { + return "", fmt.Errorf("failed to get node: %w", err) + } + + // Get deployment config + deploymentConfig, err := utils.DeserializeDeploymentConfig(dbNode.DeploymentConfig.String) + if err != nil { + return "", fmt.Errorf("failed to deserialize deployment config: %w", err) + } + + switch types.NodeType(dbNode.NodeType.String) { + case types.NodeTypeFabricPeer: + nodeConfig, err := utils.LoadNodeConfig([]byte(dbNode.NodeConfig.String)) + if err != nil { + return "", fmt.Errorf("failed to deserialize node config: %w", err) + } + peerNodeConfig, ok := nodeConfig.(*types.FabricPeerConfig) + if !ok { + return "", fmt.Errorf("failed to assert node config to FabricPeerConfig") + } + s.logger.Debug("Peer config", "config", peerNodeConfig, "deploymentConfig", deploymentConfig) + // Get organization + org, err := s.orgService.GetOrganization(ctx, peerNodeConfig.OrganizationID) + if err != nil { + return "", fmt.Errorf("failed to get organization: %w", err) + } + + // Create peer instance + localPeer := s.getPeerFromConfig(dbNode, org, peerNodeConfig) + + // Tail logs from peer + return localPeer.GetStdOutPath(), nil + case types.NodeTypeFabricOrderer: + // Convert to FabricOrdererDeploymentConfig + nodeConfig, err := utils.LoadNodeConfig([]byte(dbNode.NodeConfig.String)) + if err != nil { + return "", fmt.Errorf("failed to deserialize node config: %w", err) + } + ordererNodeConfig, ok := nodeConfig.(*types.FabricOrdererConfig) + if !ok { + return "", fmt.Errorf("failed to assert node config to FabricOrdererConfig") + } + s.logger.Info("Orderer config", "config", ordererNodeConfig, "deploymentConfig", deploymentConfig) + // Get organization + org, err := s.orgService.GetOrganization(ctx, ordererNodeConfig.OrganizationID) + if err != nil { + return "", fmt.Errorf("failed to get organization: %w", err) + } + // Create orderer instance + localOrderer := s.getOrdererFromConfig(dbNode, org, ordererNodeConfig) + // Tail logs from orderer + return localOrderer.GetStdOutPath(), nil + case types.NodeTypeBesuFullnode: + nodeConfig, err := utils.LoadNodeConfig([]byte(dbNode.NodeConfig.String)) + if err != nil { + return "", fmt.Errorf("failed to deserialize node config: %w", err) + } + besuNodeConfig, ok := nodeConfig.(*types.BesuNodeConfig) + if !ok { + return "", fmt.Errorf("failed to assert node config to BesuNodeConfig") + } + besuDeployConfig := deploymentConfig.ToBesuNodeConfig() + + localBesu, err := s.getBesuFromConfig(ctx, dbNode, besuNodeConfig, besuDeployConfig) + if err != nil { + return "", fmt.Errorf("failed to get besu from config: %w", err) + } + return localBesu.GetStdOutPath(), nil + default: + return "", fmt.Errorf("unsupported node type for log tailing: %s", dbNode.NodeType.String) + } +} + // TailLogs returns a channel that receives log lines from the specified node func (s *NodeService) TailLogs(ctx context.Context, nodeID int64, tail int, follow bool) (<-chan string, error) { // Get the node first to verify it exists @@ -1984,28 +2074,69 @@ func GetBesuPorts(baseP2PPort, baseRPCPort uint) (p2pPort uint, rpcPort uint, er return p2pPort, rpcPort, nil } -// GetBesuNodeDefaults returns the default configuration for a Besu node -func (s *NodeService) GetBesuNodeDefaults() (*BesuNodeDefaults, error) { - // Try to get available ports starting from default Besu ports - p2pPort, rpcPort, err := GetBesuPorts(30303, 8545) +// GetBesuNodeDefaults returns the default configuration for Besu nodes +func (s *NodeService) GetBesuNodeDefaults(besuNodes int) ([]BesuNodeDefaults, error) { + // Validate node count + if besuNodes <= 0 { + besuNodes = 1 + } + if besuNodes > 15 { + return nil, fmt.Errorf("besu node count exceeds maximum supported nodes (15)") + } + + // Get external IP for p2p communication + externalIP, err := s.GetExternalIP() if err != nil { - // If we can't get the preferred ports, try from a higher range - p2pPort, rpcPort, err = GetBesuPorts(40303, 18545) + return nil, fmt.Errorf("failed to get external IP: %w", err) + } + + // Use localhost for internal IP + internalIP := "127.0.0.1" + + // Base ports for Besu nodes with sufficient spacing + const ( + baseP2PPort = 30303 // Starting P2P port + baseRPCPort = 8545 // Starting RPC port + portOffset = 100 // Each node gets a 100 port range + ) + + // Create array to hold all node defaults + nodeDefaults := make([]BesuNodeDefaults, besuNodes) + + // Generate defaults for each node + for i := 0; i < besuNodes; i++ { + // Try to get ports for each node + p2pPort, rpcPort, err := GetBesuPorts( + uint(baseP2PPort+(i*portOffset)), + uint(baseRPCPort+(i*portOffset)), + ) if err != nil { - return nil, fmt.Errorf("failed to find available ports: %w", err) + // If we can't get the preferred ports, try from a higher range + p2pPort, rpcPort, err = GetBesuPorts( + uint(40303+(i*portOffset)), + uint(18545+(i*portOffset)), + ) + if err != nil { + return nil, fmt.Errorf("failed to find available ports for node %d: %w", i+1, err) + } + } + + // Create node defaults with unique ports + nodeDefaults[i] = BesuNodeDefaults{ + P2PHost: externalIP, // Use external IP for p2p host + P2PPort: p2pPort, + RPCHost: "0.0.0.0", // Allow RPC from any interface + RPCPort: rpcPort, + ExternalIP: externalIP, + InternalIP: internalIP, + Mode: ModeService, + Env: map[string]string{ + "JAVA_OPTS": "-Xmx4g", + }, } } - externalIP := "127.0.0.1" - internalIP := "127.0.0.1" - return &BesuNodeDefaults{ - P2PAddress: fmt.Sprintf("%s:%d", externalIP, p2pPort), - RPCAddress: fmt.Sprintf("%s:%d", externalIP, rpcPort), - NetworkID: 1337, // Default private network ID - Mode: ModeService, - ExternalIP: externalIP, - InternalIP: internalIP, - }, nil + return nodeDefaults, nil } // Add a method to get full node details when needed @@ -2023,3 +2154,770 @@ func (s *NodeService) GetNodeWithConfig(ctx context.Context, id int64) (*Node, e func (s *NodeService) GetNodeForDeployment(ctx context.Context, id int64) (*Node, error) { return s.GetNodeWithConfig(ctx, id) } + +// Channel represents a Fabric channel +type Channel struct { + Name string `json:"name"` + BlockNum int64 `json:"blockNum"` + CreatedAt time.Time `json:"createdAt"` +} + +// GetNodeChannels retrieves the list of channels for a Fabric node +func (s *NodeService) GetNodeChannels(ctx context.Context, id int64) ([]Channel, error) { + // Get the node first + node, err := s.db.GetNode(ctx, id) + if err != nil { + if err == sql.ErrNoRows { + return nil, errors.NewNotFoundError("node not found", nil) + } + return nil, fmt.Errorf("failed to get node: %w", err) + } + + // Verify node type + nodeType := types.NodeType(node.NodeType.String) + if nodeType != types.NodeTypeFabricPeer && nodeType != types.NodeTypeFabricOrderer { + return nil, errors.NewValidationError("node is not a Fabric node", nil) + } + + switch nodeType { + case types.NodeTypeFabricPeer: + // Get peer instance + peer, err := s.GetFabricPeer(ctx, id) + if err != nil { + return nil, fmt.Errorf("failed to get peer: %w", err) + } + peerChannels, err := peer.GetChannels(ctx) + if err != nil { + return nil, fmt.Errorf("failed to get peer channels: %w", err) + } + channels := make([]Channel, len(peerChannels)) + for i, channel := range peerChannels { + channels[i] = Channel{ + Name: channel.Name, + BlockNum: channel.BlockNum, + CreatedAt: channel.CreatedAt, + } + } + return channels, nil + + case types.NodeTypeFabricOrderer: + // Get orderer instance + orderer, err := s.GetFabricOrderer(ctx, id) + if err != nil { + return nil, fmt.Errorf("failed to get orderer: %w", err) + } + ordererChannels, err := orderer.GetChannels(ctx) + if err != nil { + return nil, fmt.Errorf("failed to get orderer channels: %w", err) + } + channels := make([]Channel, len(ordererChannels)) + for i, channel := range ordererChannels { + channels[i] = Channel{ + Name: channel.Name, + BlockNum: channel.BlockNum, + CreatedAt: channel.CreatedAt, + } + } + return channels, nil + } + + return nil, fmt.Errorf("unsupported node type: %s", nodeType) +} + +// RenewCertificates renews the certificates for a node +func (s *NodeService) RenewCertificates(ctx context.Context, id int64) (*NodeResponse, error) { + // Get the node from database + node, err := s.db.GetNode(ctx, id) + if err != nil { + if err == sql.ErrNoRows { + return nil, errors.NewNotFoundError("node not found", nil) + } + return nil, fmt.Errorf("failed to get node: %w", err) + } + + // Update status to indicate certificate renewal is in progress + if err := s.updateNodeStatus(ctx, id, types.NodeStatusUpdating); err != nil { + return nil, fmt.Errorf("failed to update node status: %w", err) + } + + // Get deployment config + deploymentConfig, err := utils.DeserializeDeploymentConfig(node.DeploymentConfig.String) + if err != nil { + return nil, fmt.Errorf("failed to deserialize deployment config: %w", err) + } + + var renewErr error + switch types.NodeType(node.NodeType.String) { + case types.NodeTypeFabricPeer: + renewErr = s.renewPeerCertificates(ctx, node, deploymentConfig) + case types.NodeTypeFabricOrderer: + renewErr = s.renewOrdererCertificates(ctx, node, deploymentConfig) + default: + renewErr = fmt.Errorf("certificate renewal not supported for node type: %s", node.NodeType.String) + } + + if renewErr != nil { + // Update status to error if renewal failed + if err := s.updateNodeStatusWithError(ctx, id, types.NodeStatusError, fmt.Sprintf("Failed to renew certificates: %v", renewErr)); err != nil { + s.logger.Error("Failed to update node status after renewal error", "error", err) + } + return nil, fmt.Errorf("failed to renew certificates: %w", renewErr) + } + + // Update status to running after successful renewal + if err := s.updateNodeStatus(ctx, id, types.NodeStatusRunning); err != nil { + return nil, fmt.Errorf("failed to update node status: %w", err) + } + + // Get updated node + updatedNode, err := s.GetNode(ctx, id) + if err != nil { + return nil, fmt.Errorf("failed to get updated node: %w", err) + } + + return updatedNode, nil +} + +// renewPeerCertificates handles certificate renewal for a Fabric peer +func (s *NodeService) renewPeerCertificates(ctx context.Context, dbNode *db.Node, deploymentConfig types.NodeDeploymentConfig) error { + nodeConfig, err := utils.LoadNodeConfig([]byte(dbNode.NodeConfig.String)) + if err != nil { + return fmt.Errorf("failed to load node config: %w", err) + } + + peerConfig, ok := nodeConfig.(*types.FabricPeerConfig) + if !ok { + return fmt.Errorf("invalid peer config type") + } + + peerDeployConfig, ok := deploymentConfig.(*types.FabricPeerDeploymentConfig) + if !ok { + return fmt.Errorf("invalid peer deployment config type") + } + + org, err := s.orgService.GetOrganization(ctx, peerConfig.OrganizationID) + if err != nil { + return fmt.Errorf("failed to get organization: %w", err) + } + + localPeer := s.getPeerFromConfig(dbNode, org, peerConfig) + err = localPeer.RenewCertificates(peerDeployConfig) + if err != nil { + return fmt.Errorf("failed to renew peer certificates: %w", err) + } + + return nil +} + +// renewOrdererCertificates handles certificate renewal for a Fabric orderer +func (s *NodeService) renewOrdererCertificates(ctx context.Context, dbNode *db.Node, deploymentConfig types.NodeDeploymentConfig) error { + nodeConfig, err := utils.LoadNodeConfig([]byte(dbNode.NodeConfig.String)) + if err != nil { + return fmt.Errorf("failed to load node config: %w", err) + } + + ordererConfig, ok := nodeConfig.(*types.FabricOrdererConfig) + if !ok { + return fmt.Errorf("invalid orderer config type") + } + + ordererDeployConfig, ok := deploymentConfig.(*types.FabricOrdererDeploymentConfig) + if !ok { + return fmt.Errorf("invalid orderer deployment config type") + } + + org, err := s.orgService.GetOrganization(ctx, ordererConfig.OrganizationID) + if err != nil { + return fmt.Errorf("failed to get organization: %w", err) + } + + localOrderer := s.getOrdererFromConfig(dbNode, org, ordererConfig) + err = localOrderer.RenewCertificates(ordererDeployConfig) + if err != nil { + return fmt.Errorf("failed to renew orderer certificates: %w", err) + } + + return nil +} + +// UpdateNodeEnvironment updates the environment variables for a node +func (s *NodeService) UpdateNodeEnvironment(ctx context.Context, nodeID int64, req *types.UpdateNodeEnvRequest) (*types.UpdateNodeEnvResponse, error) { + // Get the node from the database + dbNode, err := s.db.GetNode(ctx, nodeID) + if err != nil { + return nil, fmt.Errorf("failed to get node: %w", err) + } + + // Get the node's current configuration + switch dbNode.NodeType.String { + case string(types.NodeTypeFabricPeer): + var peerConfig types.FabricPeerConfig + if err := json.Unmarshal([]byte(dbNode.Config.String), &peerConfig); err != nil { + return nil, fmt.Errorf("failed to unmarshal peer config: %w", err) + } + peerConfig.Env = req.Env + newConfig, err := json.Marshal(peerConfig) + if err != nil { + return nil, fmt.Errorf("failed to marshal updated peer config: %w", err) + } + if _, err := s.db.UpdateNodeConfig(ctx, &db.UpdateNodeConfigParams{ + ID: nodeID, + NodeConfig: sql.NullString{String: string(newConfig), Valid: true}, + }); err != nil { + return nil, fmt.Errorf("failed to update node config: %w", err) + } + + case string(types.NodeTypeFabricOrderer): + var ordererConfig types.FabricOrdererConfig + if err := json.Unmarshal([]byte(dbNode.Config.String), &ordererConfig); err != nil { + return nil, fmt.Errorf("failed to unmarshal orderer config: %w", err) + } + ordererConfig.Env = req.Env + newConfig, err := json.Marshal(ordererConfig) + if err != nil { + return nil, fmt.Errorf("failed to marshal updated orderer config: %w", err) + } + if _, err := s.db.UpdateNodeConfig(ctx, &db.UpdateNodeConfigParams{ + ID: nodeID, + NodeConfig: sql.NullString{String: string(newConfig), Valid: true}, + }); err != nil { + return nil, fmt.Errorf("failed to update node config: %w", err) + } + + default: + return nil, fmt.Errorf("unsupported node type: %s", dbNode.NodeType.String) + } + + // Return the updated environment variables and indicate that a restart is required + return &types.UpdateNodeEnvResponse{ + Env: req.Env, + RequiresRestart: true, + }, nil +} + +// GetNodeEnvironment retrieves the current environment variables for a node +func (s *NodeService) GetNodeEnvironment(ctx context.Context, nodeID int64) (map[string]string, error) { + // Get the node from the database + dbNode, err := s.db.GetNode(ctx, nodeID) + if err != nil { + return nil, fmt.Errorf("failed to get node: %w", err) + } + + // Get the node's current configuration + switch dbNode.NodeType.String { + case string(types.NodeTypeFabricPeer): + var peerConfig types.FabricPeerConfig + if err := json.Unmarshal([]byte(dbNode.Config.String), &peerConfig); err != nil { + return nil, fmt.Errorf("failed to unmarshal peer config: %w", err) + } + return peerConfig.Env, nil + + case string(types.NodeTypeFabricOrderer): + var ordererConfig types.FabricOrdererConfig + if err := json.Unmarshal([]byte(dbNode.Config.String), &ordererConfig); err != nil { + return nil, fmt.Errorf("failed to unmarshal orderer config: %w", err) + } + return ordererConfig.Env, nil + + default: + return nil, fmt.Errorf("unsupported node type: %s", dbNode.NodeType.String) + } +} + +// UpdateFabricPeer updates a Fabric peer node configuration +func (s *NodeService) UpdateFabricPeer(ctx context.Context, opts UpdateFabricPeerOpts) (*NodeResponse, error) { + // Get the node from database + node, err := s.db.GetNode(ctx, opts.NodeID) + if err != nil { + if err == sql.ErrNoRows { + return nil, errors.NewNotFoundError("peer node not found", nil) + } + return nil, fmt.Errorf("failed to get peer node: %w", err) + } + + // Verify node type + if types.NodeType(node.NodeType.String) != types.NodeTypeFabricPeer { + return nil, fmt.Errorf("node %d is not a Fabric peer", opts.NodeID) + } + + // Load current config + nodeConfig, err := utils.LoadNodeConfig([]byte(node.NodeConfig.String)) + if err != nil { + return nil, fmt.Errorf("failed to load peer config: %w", err) + } + + peerConfig, ok := nodeConfig.(*types.FabricPeerConfig) + if !ok { + return nil, fmt.Errorf("invalid peer config type") + } + + deployConfig, err := utils.DeserializeDeploymentConfig(node.DeploymentConfig.String) + if err != nil { + return nil, fmt.Errorf("failed to deserialize deployment config: %w", err) + } + deployPeerConfig, ok := deployConfig.(*types.FabricPeerDeploymentConfig) + if !ok { + return nil, fmt.Errorf("invalid deployment config type") + } + + // Update configuration fields if provided + if opts.ExternalEndpoint != "" && opts.ExternalEndpoint != peerConfig.ExternalEndpoint { + peerConfig.ExternalEndpoint = opts.ExternalEndpoint + } + if opts.ListenAddress != "" && opts.ListenAddress != peerConfig.ListenAddress { + if err := s.validateAddress(opts.ListenAddress); err != nil { + return nil, fmt.Errorf("invalid listen address: %w", err) + } + peerConfig.ListenAddress = opts.ListenAddress + } + if opts.EventsAddress != "" && opts.EventsAddress != peerConfig.EventsAddress { + if err := s.validateAddress(opts.EventsAddress); err != nil { + return nil, fmt.Errorf("invalid events address: %w", err) + } + peerConfig.EventsAddress = opts.EventsAddress + } + if opts.OperationsListenAddress != "" && opts.OperationsListenAddress != peerConfig.OperationsListenAddress { + if err := s.validateAddress(opts.OperationsListenAddress); err != nil { + return nil, fmt.Errorf("invalid operations listen address: %w", err) + } + peerConfig.OperationsListenAddress = opts.OperationsListenAddress + } + if opts.ChaincodeAddress != "" && opts.ChaincodeAddress != peerConfig.ChaincodeAddress { + if err := s.validateAddress(opts.ChaincodeAddress); err != nil { + return nil, fmt.Errorf("invalid chaincode address: %w", err) + } + peerConfig.ChaincodeAddress = opts.ChaincodeAddress + } + if opts.DomainNames != nil { + peerConfig.DomainNames = opts.DomainNames + } + if opts.Env != nil { + peerConfig.Env = opts.Env + } + if opts.AddressOverrides != nil { + peerConfig.AddressOverrides = opts.AddressOverrides + deployPeerConfig.AddressOverrides = opts.AddressOverrides + } + if opts.Version != "" { + peerConfig.Version = opts.Version + deployPeerConfig.Version = opts.Version + } + + // Validate all addresses together for port conflicts + if err := s.validateFabricPeerAddresses(peerConfig); err != nil { + return nil, err + } + + configBytes, err := utils.StoreNodeConfig(nodeConfig) + if err != nil { + return nil, fmt.Errorf("failed to store node config: %w", err) + } + node, err = s.db.UpdateNodeConfig(ctx, &db.UpdateNodeConfigParams{ + ID: opts.NodeID, + NodeConfig: sql.NullString{ + String: string(configBytes), + Valid: true, + }, + }) + if err != nil { + return nil, fmt.Errorf("failed to update node config: %w", err) + } + + // Update the deployment config in the database + deploymentConfigBytes, err := json.Marshal(deployPeerConfig) + if err != nil { + return nil, fmt.Errorf("failed to marshal updated deployment config: %w", err) + } + + node, err = s.db.UpdateDeploymentConfig(ctx, &db.UpdateDeploymentConfigParams{ + ID: opts.NodeID, + DeploymentConfig: sql.NullString{ + String: string(deploymentConfigBytes), + Valid: true, + }, + }) + + // Synchronize the peer config + if err := s.SynchronizePeerConfig(ctx, opts.NodeID); err != nil { + return nil, fmt.Errorf("failed to synchronize peer config: %w", err) + } + + // Return updated node response + _, nodeResponse := s.mapDBNodeToServiceNode(node) + return nodeResponse, nil +} + +// UpdateFabricOrderer updates a Fabric orderer node configuration +func (s *NodeService) UpdateFabricOrderer(ctx context.Context, opts UpdateFabricOrdererOpts) (*NodeResponse, error) { + // Get the node from database + node, err := s.db.GetNode(ctx, opts.NodeID) + if err != nil { + if err == sql.ErrNoRows { + return nil, errors.NewNotFoundError("orderer node not found", nil) + } + return nil, fmt.Errorf("failed to get orderer node: %w", err) + } + + // Verify node type + if types.NodeType(node.NodeType.String) != types.NodeTypeFabricOrderer { + return nil, fmt.Errorf("node %d is not a Fabric orderer", opts.NodeID) + } + + // Load current config + nodeConfig, err := utils.LoadNodeConfig([]byte(node.NodeConfig.String)) + if err != nil { + return nil, fmt.Errorf("failed to load orderer config: %w", err) + } + + ordererConfig, ok := nodeConfig.(*types.FabricOrdererConfig) + if !ok { + return nil, fmt.Errorf("invalid orderer config type") + } + + // Load deployment config + deployOrdererConfig := &types.FabricOrdererDeploymentConfig{} + if node.DeploymentConfig.Valid { + deploymentConfig, err := utils.DeserializeDeploymentConfig(node.DeploymentConfig.String) + if err != nil { + return nil, fmt.Errorf("failed to deserialize deployment config: %w", err) + } + var ok bool + deployOrdererConfig, ok = deploymentConfig.(*types.FabricOrdererDeploymentConfig) + if !ok { + return nil, fmt.Errorf("invalid orderer deployment config type") + } + } + + // Update configuration fields if provided + if opts.ExternalEndpoint != "" && opts.ExternalEndpoint != ordererConfig.ExternalEndpoint { + ordererConfig.ExternalEndpoint = opts.ExternalEndpoint + } + if opts.ListenAddress != "" && opts.ListenAddress != ordererConfig.ListenAddress { + if err := s.validateAddress(opts.ListenAddress); err != nil { + return nil, fmt.Errorf("invalid listen address: %w", err) + } + ordererConfig.ListenAddress = opts.ListenAddress + } + if opts.AdminAddress != "" && opts.AdminAddress != ordererConfig.AdminAddress { + if err := s.validateAddress(opts.AdminAddress); err != nil { + return nil, fmt.Errorf("invalid admin address: %w", err) + } + ordererConfig.AdminAddress = opts.AdminAddress + } + if opts.OperationsListenAddress != "" && opts.OperationsListenAddress != ordererConfig.OperationsListenAddress { + if err := s.validateAddress(opts.OperationsListenAddress); err != nil { + return nil, fmt.Errorf("invalid operations listen address: %w", err) + } + ordererConfig.OperationsListenAddress = opts.OperationsListenAddress + } + if opts.DomainNames != nil { + ordererConfig.DomainNames = opts.DomainNames + } + if opts.Env != nil { + ordererConfig.Env = opts.Env + } + if opts.Version != "" { + ordererConfig.Version = opts.Version + deployOrdererConfig.Version = opts.Version + } + + configBytes, err := utils.StoreNodeConfig(nodeConfig) + if err != nil { + return nil, fmt.Errorf("failed to store node config: %w", err) + } + node, err = s.db.UpdateNodeConfig(ctx, &db.UpdateNodeConfigParams{ + ID: opts.NodeID, + NodeConfig: sql.NullString{ + String: string(configBytes), + Valid: true, + }, + }) + if err != nil { + return nil, fmt.Errorf("failed to update node config: %w", err) + } + + // Update the deployment config in the database + deploymentConfigBytes, err := json.Marshal(deployOrdererConfig) + if err != nil { + return nil, fmt.Errorf("failed to marshal updated deployment config: %w", err) + } + + node, err = s.db.UpdateDeploymentConfig(ctx, &db.UpdateDeploymentConfigParams{ + ID: opts.NodeID, + DeploymentConfig: sql.NullString{ + String: string(deploymentConfigBytes), + Valid: true, + }, + }) + + // Return updated node response + _, nodeResponse := s.mapDBNodeToServiceNode(node) + return nodeResponse, nil +} + +// SynchronizePeerConfig synchronizes the peer's configuration files and service +func (s *NodeService) SynchronizePeerConfig(ctx context.Context, nodeID int64) error { + // Get the node from database + node, err := s.db.GetNode(ctx, nodeID) + if err != nil { + return fmt.Errorf("failed to get node: %w", err) + } + + // Verify node type + if types.NodeType(node.NodeType.String) != types.NodeTypeFabricPeer { + return fmt.Errorf("node %d is not a Fabric peer", nodeID) + } + + // Load node config + nodeConfig, err := utils.LoadNodeConfig([]byte(node.NodeConfig.String)) + if err != nil { + return fmt.Errorf("failed to load node config: %w", err) + } + + peerConfig, ok := nodeConfig.(*types.FabricPeerConfig) + if !ok { + return fmt.Errorf("invalid peer config type") + } + + // Get organization + org, err := s.orgService.GetOrganization(ctx, peerConfig.OrganizationID) + if err != nil { + return fmt.Errorf("failed to get organization: %w", err) + } + + // Get local peer instance + localPeer := s.getPeerFromConfig(node, org, peerConfig) + + // Get deployment config + deploymentConfig, err := utils.DeserializeDeploymentConfig(node.DeploymentConfig.String) + if err != nil { + return fmt.Errorf("failed to deserialize deployment config: %w", err) + } + + peerDeployConfig, ok := deploymentConfig.(*types.FabricPeerDeploymentConfig) + if !ok { + return fmt.Errorf("invalid peer deployment config type") + } + + // Synchronize configuration + if err := localPeer.SynchronizeConfig(peerDeployConfig); err != nil { + return fmt.Errorf("failed to synchronize peer config: %w", err) + } + + return nil +} + +// GetExternalIP returns the external IP address of the node +func (s *NodeService) GetExternalIP() (string, error) { + // Try to get external IP from environment variable first + if externalIP := os.Getenv("EXTERNAL_IP"); externalIP != "" { + return externalIP, nil + } + + // Get local network interfaces + interfaces, err := net.Interfaces() + if err != nil { + return "", fmt.Errorf("failed to get network interfaces: %w", err) + } + + // Look for a suitable non-loopback interface with an IPv4 address + for _, iface := range interfaces { + // Skip loopback, down interfaces, and interfaces without addresses + if iface.Flags&net.FlagLoopback != 0 || iface.Flags&net.FlagUp == 0 { + continue + } + + addrs, err := iface.Addrs() + if err != nil { + continue + } + + for _, addr := range addrs { + // Check if this is an IP network address + ipNet, ok := addr.(*net.IPNet) + if !ok { + continue + } + + // Skip loopback and IPv6 addresses + ip := ipNet.IP.To4() + if ip == nil || ip.IsLoopback() { + continue + } + + // Skip link-local addresses + if ip[0] == 169 && ip[1] == 254 { + continue + } + + // Found a suitable IP address + return ip.String(), nil + } + } + + // Fallback to localhost if no suitable interface is found + return "127.0.0.1", nil +} + +// UpdateBesuNodeOpts contains the options for updating a Besu node +type UpdateBesuNodeRequest struct { + NetworkID uint `json:"networkId" validate:"required"` + P2PHost string `json:"p2pHost" validate:"required"` + P2PPort uint `json:"p2pPort" validate:"required"` + RPCHost string `json:"rpcHost" validate:"required"` + RPCPort uint `json:"rpcPort" validate:"required"` + Bootnodes []string `json:"bootnodes,omitempty"` + ExternalIP string `json:"externalIp,omitempty"` + InternalIP string `json:"internalIp,omitempty"` + Env map[string]string `json:"env,omitempty"` +} + +// UpdateBesuNode updates an existing Besu node configuration +func (s *NodeService) UpdateBesuNode(ctx context.Context, nodeID int64, req UpdateBesuNodeRequest) (*NodeResponse, error) { + // Get existing node + node, err := s.db.GetNode(ctx, nodeID) + if err != nil { + if err == sql.ErrNoRows { + return nil, errors.NewNotFoundError("node not found", nil) + } + return nil, fmt.Errorf("failed to get node: %w", err) + } + + // Verify node type + if types.NodeType(node.NodeType.String) != types.NodeTypeBesuFullnode { + return nil, errors.NewValidationError("node is not a Besu node", nil) + } + + // Load current config + nodeConfig, err := utils.LoadNodeConfig([]byte(node.NodeConfig.String)) + if err != nil { + return nil, fmt.Errorf("failed to load besu config: %w", err) + } + + besuConfig, ok := nodeConfig.(*types.BesuNodeConfig) + if !ok { + return nil, fmt.Errorf("invalid besu config type") + } + + // Load deployment config + deployBesuConfig := &types.BesuNodeDeploymentConfig{} + if node.DeploymentConfig.Valid { + deploymentConfig, err := utils.DeserializeDeploymentConfig(node.DeploymentConfig.String) + if err != nil { + return nil, fmt.Errorf("failed to deserialize deployment config: %w", err) + } + var ok bool + deployBesuConfig, ok = deploymentConfig.(*types.BesuNodeDeploymentConfig) + if !ok { + return nil, fmt.Errorf("invalid besu deployment config type") + } + } + + // Update configuration fields + besuConfig.NetworkID = int64(req.NetworkID) + besuConfig.P2PPort = req.P2PPort + besuConfig.RPCPort = req.RPCPort + besuConfig.P2PHost = req.P2PHost + besuConfig.RPCHost = req.RPCHost + deployBesuConfig.NetworkID = int64(req.NetworkID) + deployBesuConfig.P2PPort = req.P2PPort + deployBesuConfig.RPCPort = req.RPCPort + deployBesuConfig.P2PHost = req.P2PHost + deployBesuConfig.RPCHost = req.RPCHost + if req.Bootnodes != nil { + besuConfig.BootNodes = req.Bootnodes + } + + if req.ExternalIP != "" { + besuConfig.ExternalIP = req.ExternalIP + deployBesuConfig.ExternalIP = req.ExternalIP + } + if req.InternalIP != "" { + besuConfig.InternalIP = req.InternalIP + deployBesuConfig.InternalIP = req.InternalIP + } + + // Update environment variables + if req.Env != nil { + besuConfig.Env = req.Env + deployBesuConfig.Env = req.Env + } + + // Get the key to update the enodeURL + key, err := s.keymanagementService.GetKey(ctx, int(besuConfig.KeyID)) + if err != nil { + return nil, fmt.Errorf("failed to get key: %w", err) + } + + // Update enodeURL based on the public key, external IP and P2P port + if key.PublicKey != "" { + publicKey := key.PublicKey[2:] + deployBesuConfig.EnodeURL = fmt.Sprintf("enode://%s@%s:%d", publicKey, besuConfig.ExternalIP, besuConfig.P2PPort) + } + + // Store updated node config + configBytes, err := utils.StoreNodeConfig(besuConfig) + if err != nil { + return nil, fmt.Errorf("failed to store node config: %w", err) + } + + node, err = s.db.UpdateNodeConfig(ctx, &db.UpdateNodeConfigParams{ + ID: nodeID, + NodeConfig: sql.NullString{ + String: string(configBytes), + Valid: true, + }, + }) + if err != nil { + return nil, fmt.Errorf("failed to update node config: %w", err) + } + + // Update deployment config + deploymentConfigBytes, err := json.Marshal(deployBesuConfig) + if err != nil { + return nil, fmt.Errorf("failed to marshal deployment config: %w", err) + } + + node, err = s.db.UpdateDeploymentConfig(ctx, &db.UpdateDeploymentConfigParams{ + ID: nodeID, + DeploymentConfig: sql.NullString{ + String: string(deploymentConfigBytes), + Valid: true, + }, + }) + if err != nil { + return nil, fmt.Errorf("failed to update deployment config: %w", err) + } + + // Return updated node + _, nodeResponse := s.mapDBNodeToServiceNode(node) + return nodeResponse, nil +} + +// validateBesuConfig validates the Besu node configuration +func (s *NodeService) validateBesuConfig(config *types.BesuNodeConfig) error { + + if config.P2PPort == 0 { + return fmt.Errorf("p2p port is required") + } + if config.RPCPort == 0 { + return fmt.Errorf("rpc port is required") + } + if config.NetworkID == 0 { + return fmt.Errorf("network ID is required") + } + if config.P2PHost == "" { + return fmt.Errorf("p2p host is required") + } + if config.RPCHost == "" { + return fmt.Errorf("rpc host is required") + } + if config.ExternalIP == "" { + return fmt.Errorf("external IP is required") + } + if config.InternalIP == "" { + return fmt.Errorf("internal IP is required") + } + + return nil +} diff --git a/pkg/nodes/service/types.go b/pkg/nodes/service/types.go new file mode 100644 index 0000000..3bf8de7 --- /dev/null +++ b/pkg/nodes/service/types.go @@ -0,0 +1,31 @@ +package service + +import ( + "github.com/chainlaunch/chainlaunch/pkg/nodes/types" +) + +// UpdateFabricPeerOpts represents the options for updating a Fabric peer node +type UpdateFabricPeerOpts struct { + NodeID int64 + ExternalEndpoint string + ListenAddress string + EventsAddress string + OperationsListenAddress string + ChaincodeAddress string + DomainNames []string + Env map[string]string + AddressOverrides []types.AddressOverride + Version string +} + +// UpdateFabricOrdererOpts represents the options for updating a Fabric orderer node +type UpdateFabricOrdererOpts struct { + NodeID int64 + ExternalEndpoint string + ListenAddress string + AdminAddress string + OperationsListenAddress string + DomainNames []string + Env map[string]string + Version string +} diff --git a/pkg/nodes/types/channel.go b/pkg/nodes/types/channel.go new file mode 100644 index 0000000..5cd2cbb --- /dev/null +++ b/pkg/nodes/types/channel.go @@ -0,0 +1,10 @@ +package types + +import "time" + +// Channel represents a Fabric channel +type Channel struct { + Name string `json:"name"` + BlockNum int64 `json:"blockNum"` + CreatedAt time.Time `json:"createdAt"` +} diff --git a/pkg/nodes/types/deployment.go b/pkg/nodes/types/deployment.go index 33a3d07..cab3408 100644 --- a/pkg/nodes/types/deployment.go +++ b/pkg/nodes/types/deployment.go @@ -79,6 +79,11 @@ type FabricPeerDeploymentConfig struct { ExternalEndpoint string `json:"externalEndpoint" example:"peer0.org1.example.com:7051"` // @Description Domain names for the peer DomainNames []string `json:"domainNames,omitempty"` + + // @Description Address overrides for the peer + AddressOverrides []AddressOverride `json:"addressOverrides,omitempty"` + // @Description Fabric version to use + Version string `json:"version" example:"2.5.0"` } func (c *FabricPeerDeploymentConfig) GetMode() string { return c.Mode } @@ -108,6 +113,7 @@ func (c *FabricPeerDeploymentConfig) ToFabricPeerConfig() *FabricPeerDeploymentC TLSCert: c.TLSCert, CACert: c.CACert, TLSCACert: c.TLSCACert, + Version: c.Version, } } func (c *FabricPeerDeploymentConfig) ToFabricOrdererConfig() *FabricOrdererDeploymentConfig { @@ -151,12 +157,18 @@ type FabricOrdererDeploymentConfig struct { ExternalEndpoint string `json:"externalEndpoint" example:"orderer.example.com:7050"` // @Description Domain names for the orderer DomainNames []string `json:"domainNames,omitempty"` + // @Description Fabric version to use + Version string `json:"version" example:"2.5.0"` } func (c *FabricOrdererDeploymentConfig) GetURL() string { return fmt.Sprintf("grpcs://%s", c.ExternalEndpoint) } +func (c *FabricOrdererDeploymentConfig) GetAddress() string { + return c.ExternalEndpoint +} + func (c *FabricOrdererDeploymentConfig) GetMode() string { return c.Mode } func (c *FabricOrdererDeploymentConfig) Validate() error { if c.Mode != "service" && c.Mode != "docker" { @@ -187,6 +199,7 @@ func (c *FabricOrdererDeploymentConfig) ToFabricOrdererConfig() *FabricOrdererDe TLSCACert: c.TLSCACert, SignKeyID: c.SignKeyID, TLSKeyID: c.TLSKeyID, + Version: c.Version, } } @@ -285,6 +298,10 @@ type FabricPeerConfig struct { Env map[string]string `json:"env,omitempty"` // @Description Fabric version to use Version string `json:"version" example:"2.2.0"` + // @Description Orderer address overrides for the peer + OrdererAddressOverrides []OrdererAddressOverride `json:"ordererAddressOverrides,omitempty"` + // @Description Address overrides for the peer + AddressOverrides []AddressOverride `json:"addressOverrides,omitempty"` } // FabricOrdererConfig represents the parameters needed to create a Fabric orderer node @@ -300,6 +317,8 @@ type FabricOrdererConfig struct { DomainNames []string `json:"domainNames,omitempty"` Env map[string]string `json:"env,omitempty"` Version string `json:"version"` // Fabric version to use + // @Description Address overrides for the orderer + AddressOverrides []AddressOverride `json:"addressOverrides,omitempty"` } // BesuNodeConfig represents the parameters needed to create a Besu node @@ -442,3 +461,104 @@ func MapToNodeConfig(deploymentConfig NodeDeploymentConfig) (NodeConfig, error) return nil, fmt.Errorf("unsupported node type: %s", deploymentConfig.GetType()) } } + +// UpdateNodeEnvRequest represents a request to update a node's environment variables +type UpdateNodeEnvRequest struct { + // @Description Environment variables to update + Env map[string]string `json:"env" validate:"required"` +} + +// UpdateNodeEnvResponse represents the response after updating a node's environment variables +type UpdateNodeEnvResponse struct { + // @Description Updated environment variables + Env map[string]string `json:"env"` + // @Description Whether the node needs to be restarted for changes to take effect + RequiresRestart bool `json:"requiresRestart"` +} + +// UpdateNodeConfigRequest represents a request to update a node's configuration +type UpdateNodeConfigRequest struct { + // Common fields + // @Description Environment variables to update + Env map[string]string `json:"env,omitempty"` + // @Description Domain names for the node + DomainNames []string `json:"domainNames,omitempty"` + // @Description The deployment mode (service or docker) + Mode string `json:"mode,omitempty" validate:"omitempty,oneof=service docker"` + + // Fabric peer specific fields + // @Description Listen address for the peer + ListenAddress string `json:"listenAddress,omitempty"` + // @Description Chaincode listen address + ChaincodeAddress string `json:"chaincodeAddress,omitempty"` + // @Description Events listen address + EventsAddress string `json:"eventsAddress,omitempty"` + // @Description Operations listen address + OperationsListenAddress string `json:"operationsListenAddress,omitempty"` + // @Description External endpoint for the peer + ExternalEndpoint string `json:"externalEndpoint,omitempty"` + + // Fabric orderer specific fields + // @Description Admin listen address for orderer + AdminAddress string `json:"adminAddress,omitempty"` + + // Besu specific fields + // @Description P2P port for Besu node + P2PPort uint `json:"p2pPort,omitempty"` + // @Description RPC port for Besu node + RPCPort uint `json:"rpcPort,omitempty"` + // @Description P2P host address + P2PHost string `json:"p2pHost,omitempty"` + // @Description RPC host address + RPCHost string `json:"rpcHost,omitempty"` + // @Description External IP address + ExternalIP string `json:"externalIp,omitempty"` + // @Description Internal IP address + InternalIP string `json:"internalIp,omitempty"` +} + +// UpdateNodeConfigResponse represents the response after updating a node's configuration +type UpdateNodeConfigResponse struct { + // @Description Updated node configuration + Config NodeConfig `json:"config"` + // @Description Whether the node needs to be restarted for changes to take effect + RequiresRestart bool `json:"requiresRestart"` +} + +// OrdererAddressOverride represents an orderer address override configuration +type OrdererAddressOverride struct { + // @Description Original orderer address + From string `json:"from" validate:"required"` + // @Description New orderer address to use + To string `json:"to" validate:"required"` + // @Description TLS CA certificate in PEM format + TLSCACert string `json:"tlsCACert" validate:"required"` +} + +// UpdatePeerOrdererOverridesRequest represents a request to update a peer's orderer address overrides +type UpdatePeerOrdererOverridesRequest struct { + // @Description List of orderer address overrides + Overrides []OrdererAddressOverride `json:"overrides" validate:"required,dive"` +} + +// UpdatePeerOrdererOverridesResponse represents the response after updating orderer address overrides +type UpdatePeerOrdererOverridesResponse struct { + // @Description Updated orderer address overrides + Overrides []OrdererAddressOverride `json:"overrides"` + // @Description Whether the node needs to be restarted for changes to take effect + RequiresRestart bool `json:"requiresRestart"` +} + +// UpdateNodeAddressOverridesRequest represents a request to update a node's address overrides +type UpdateNodeAddressOverridesRequest struct { + // @Description List of address overrides + Overrides []AddressOverride `json:"overrides" validate:"required,dive"` +} + +// UpdateNodeAddressOverridesResponse represents the response after updating address overrides +type UpdateNodeAddressOverridesResponse struct { + // @Description Updated address overrides + Overrides []AddressOverride `json:"overrides"` + // @Description Whether the node needs to be restarted for changes to take effect + RequiresRestart bool `json:"requiresRestart"` +} diff --git a/pkg/nodes/types/types.go b/pkg/nodes/types/types.go index 2e3b35b..199db47 100644 --- a/pkg/nodes/types/types.go +++ b/pkg/nodes/types/types.go @@ -38,6 +38,7 @@ const ( NodeStatusStopped NodeStatus = "STOPPED" NodeStatusStopping NodeStatus = "STOPPING" NodeStatusStarting NodeStatus = "STARTING" + NodeStatusUpdating NodeStatus = "UPDATING" NodeStatusError NodeStatus = "ERROR" ) @@ -46,3 +47,9 @@ type StoredConfig struct { Type string `json:"type"` Config json.RawMessage `json:"config"` } + +type AddressOverride struct { + From string `json:"from"` + To string `json:"to"` + TLSCACert string `json:"tlsCACert"` +} diff --git a/pkg/notifications/http/handler.go b/pkg/notifications/http/handler.go index 6130b27..c0c4981 100644 --- a/pkg/notifications/http/handler.go +++ b/pkg/notifications/http/handler.go @@ -38,7 +38,7 @@ func (h *NotificationHandler) RegisterRoutes(r chi.Router) { // @Summary Create a notification provider // @Description Create a new notification provider with the specified configuration -// @Tags notifications +// @Tags Notifications // @Accept json // @Produce json // @Param request body CreateProviderRequest true "Provider creation request" @@ -80,7 +80,7 @@ func (h *NotificationHandler) CreateProvider(w http.ResponseWriter, r *http.Requ // @Summary List notification providers // @Description Get a list of all notification providers -// @Tags notifications +// @Tags Notifications // @Accept json // @Produce json // @Success 200 {array} ProviderResponse @@ -99,7 +99,7 @@ func (h *NotificationHandler) ListProviders(w http.ResponseWriter, r *http.Reque // @Summary Get a notification provider // @Description Get detailed information about a specific notification provider -// @Tags notifications +// @Tags Notifications // @Accept json // @Produce json // @Param id path int true "Provider ID" @@ -127,7 +127,7 @@ func (h *NotificationHandler) GetProvider(w http.ResponseWriter, r *http.Request // @Summary Update a notification provider // @Description Update an existing notification provider with new configuration -// @Tags notifications +// @Tags Notifications // @Accept json // @Produce json // @Param id path int true "Provider ID" @@ -177,7 +177,7 @@ func (h *NotificationHandler) UpdateProvider(w http.ResponseWriter, r *http.Requ // @Summary Delete a notification provider // @Description Delete a notification provider -// @Tags notifications +// @Tags Notifications // @Accept json // @Produce json // @Param id path int true "Provider ID" @@ -203,7 +203,7 @@ func (h *NotificationHandler) DeleteProvider(w http.ResponseWriter, r *http.Requ // @Summary Test a notification provider // @Description Test a notification provider -// @Tags notifications +// @Tags Notifications // @Accept json // @Produce json // @Param id path int true "Provider ID" diff --git a/pkg/notifications/service/service.go b/pkg/notifications/service/service.go index 966f857..b19ffae 100644 --- a/pkg/notifications/service/service.go +++ b/pkg/notifications/service/service.go @@ -48,7 +48,7 @@ func (s *NotificationService) CreateProvider(ctx context.Context, params notific } } - provider, err := s.queries.CreateNotificationProvider(ctx, db.CreateNotificationProviderParams{ + provider, err := s.queries.CreateNotificationProvider(ctx, &db.CreateNotificationProviderParams{ Type: string(params.Type), Name: params.Name, Config: string(configJSON), @@ -84,7 +84,7 @@ func (s *NotificationService) UpdateProvider(ctx context.Context, params notific } } - provider, err := s.queries.UpdateNotificationProvider(ctx, db.UpdateNotificationProviderParams{ + provider, err := s.queries.UpdateNotificationProvider(ctx, &db.UpdateNotificationProviderParams{ ID: params.ID, Type: string(params.Type), Name: params.Name, @@ -181,7 +181,7 @@ func (s *NotificationService) TestProvider(ctx context.Context, id int64, params } // Update provider with test results - _, err = s.queries.UpdateProviderTestResults(ctx, db.UpdateProviderTestResultsParams{ + _, err = s.queries.UpdateProviderTestResults(ctx, &db.UpdateProviderTestResultsParams{ ID: id, LastTestAt: sql.NullTime{Time: time.Now(), Valid: true}, LastTestStatus: sql.NullString{String: testStatus, Valid: true}, @@ -271,7 +271,7 @@ func (s *NotificationService) sendEmail(config notifications.SMTPConfig, from st } } -func (s *NotificationService) providerToDTO(provider db.NotificationProvider, config interface{}) *notifications.NotificationProvider { +func (s *NotificationService) providerToDTO(provider *db.NotificationProvider, config interface{}) *notifications.NotificationProvider { return ¬ifications.NotificationProvider{ ID: provider.ID, Type: notifications.ProviderType(provider.Type), @@ -300,7 +300,8 @@ func (s *NotificationService) SendBackupSuccessNotification(ctx context.Context, // Get default notification provider for backup successes provider, err := s.queries.GetDefaultNotificationProviderForType(ctx, "BACKUP_SUCCESS") if err != nil { - return fmt.Errorf("failed to get default notification provider: %w", err) + s.logger.Warn("Failed to get default notification provider for backup successes", "error", err) + return nil } if !provider.NotifyBackupSuccess { @@ -330,7 +331,8 @@ func (s *NotificationService) SendBackupFailureNotification(ctx context.Context, // Get default notification provider for backup failures provider, err := s.queries.GetDefaultNotificationProviderForType(ctx, "BACKUP_FAILURE") if err != nil { - return fmt.Errorf("failed to get default notification provider: %w", err) + s.logger.Warn("Failed to get default notification provider for backup failures", "error", err) + return nil } if !provider.NotifyBackupFailure { @@ -360,7 +362,8 @@ func (s *NotificationService) SendS3ConnectionIssueNotification(ctx context.Cont // Get default notification provider for S3 connection issues provider, err := s.queries.GetDefaultNotificationProviderForType(ctx, "S3_CONNECTION_ISSUE") if err != nil { - return fmt.Errorf("failed to get default notification provider: %w", err) + s.logger.Warn("Failed to get default notification provider for S3 connection issues", "error", err) + return nil } if !provider.NotifyS3ConnectionIssue { @@ -390,7 +393,8 @@ func (s *NotificationService) SendNodeDowntimeNotification(ctx context.Context, // Get default notification provider for node downtime provider, err := s.queries.GetDefaultNotificationProviderForType(ctx, "NODE_DOWNTIME") if err != nil { - return fmt.Errorf("failed to get default notification provider: %w", err) + s.logger.Warn("Failed to get default notification provider for node downtime", "error", err) + return nil } if !provider.NotifyNodeDowntime { @@ -420,7 +424,8 @@ func (s *NotificationService) SendNodeRecoveryNotification(ctx context.Context, // Get default notification provider for node downtime (same provider handles recovery) provider, err := s.queries.GetDefaultNotificationProviderForType(ctx, "NODE_DOWNTIME") if err != nil { - return fmt.Errorf("failed to get default notification provider: %w", err) + s.logger.Warn("Failed to get default notification provider for node recovery", "error", err) + return nil } if !provider.NotifyNodeDowntime { diff --git a/pkg/settings/http/handlers.go b/pkg/settings/http/handlers.go new file mode 100644 index 0000000..dedc2c1 --- /dev/null +++ b/pkg/settings/http/handlers.go @@ -0,0 +1,81 @@ +package http + +import ( + "encoding/json" + "net/http" + + "github.com/chainlaunch/chainlaunch/pkg/logger" + "github.com/chainlaunch/chainlaunch/pkg/settings/service" + "github.com/go-chi/chi/v5" +) + +// Handler handles HTTP requests for settings +type Handler struct { + service *service.SettingsService + logger *logger.Logger +} + +// NewHandler creates a new settings handler +func NewHandler(service *service.SettingsService, logger *logger.Logger) *Handler { + return &Handler{ + service: service, + logger: logger, + } +} + +// RegisterRoutes registers the settings routes +func (h *Handler) RegisterRoutes(r chi.Router) { + r.Route("/settings", func(r chi.Router) { + r.Post("/", h.CreateOrUpdateSetting) // Create or update the default setting + r.Get("/", h.GetSetting) // Get the default setting + r.Put("/", h.CreateOrUpdateSetting) // Update the default setting (same as POST) + }) +} + +// CreateOrUpdateSetting handles setting creation or update +// @Summary Create or update the default setting +// @Description Create or update the default setting with the provided configuration +// @Tags Settings +// @Accept json +// @Produce json +// @Param setting body service.CreateSettingParams true "Setting configuration" +// @Success 200 {object} service.Setting +// @Router /settings [post] +// @BasePath /api/v1 +func (h *Handler) CreateOrUpdateSetting(w http.ResponseWriter, r *http.Request) { + var params service.CreateSettingParams + if err := json.NewDecoder(r.Body).Decode(¶ms); err != nil { + http.Error(w, "Invalid request body", http.StatusBadRequest) + return + } + + setting, err := h.service.CreateSetting(r.Context(), params) + if err != nil { + h.logger.Error("Failed to create/update setting", "error", err) + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(setting) +} + +// GetSetting handles default setting retrieval +// @Summary Get the default setting +// @Description Get the default setting's details +// @Tags Settings +// @Produce json +// @Success 200 {object} service.Setting +// @Router /settings [get] +// @BasePath /api/v1 +func (h *Handler) GetSetting(w http.ResponseWriter, r *http.Request) { + setting, err := h.service.GetSetting(r.Context()) + if err != nil { + h.logger.Error("Failed to get setting", "error", err) + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(setting) +} diff --git a/pkg/settings/service/service.go b/pkg/settings/service/service.go new file mode 100644 index 0000000..f7d3661 --- /dev/null +++ b/pkg/settings/service/service.go @@ -0,0 +1,241 @@ +package service + +import ( + "context" + "encoding/json" + "fmt" + "text/template" + "time" + + "github.com/chainlaunch/chainlaunch/pkg/db" + "github.com/chainlaunch/chainlaunch/pkg/logger" +) + +// Default settings configuration +var defaultConfig = SettingConfig{ + PeerTemplateCMD: "{{.Cmd}}", + OrdererTemplateCMD: "{{.Cmd}}", + BesuTemplateCMD: "{{.Cmd}}", +} + +// Setting represents a setting in the service layer +type Setting struct { + ID int64 `json:"id"` + Config SettingConfig `json:"config"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` +} + +type SettingConfig struct { + PeerTemplateCMD string `json:"peerTemplateCMD"` + OrdererTemplateCMD string `json:"ordererTemplateCMD"` + BesuTemplateCMD string `json:"besuTemplateCMD"` +} + +// CreateSettingParams represents the parameters for creating a setting +type CreateSettingParams struct { + Config SettingConfig `json:"config"` +} + +// UpdateSettingParams represents the parameters for updating a setting +type UpdateSettingParams struct { + Config SettingConfig `json:"config"` +} + +// SettingsService handles operations for settings +type SettingsService struct { + queries *db.Queries + logger *logger.Logger +} + +// NewSettingsService creates a new settings service +func NewSettingsService(queries *db.Queries, logger *logger.Logger) *SettingsService { + return &SettingsService{ + queries: queries, + logger: logger, + } +} + +// validateTemplates checks if all templates in the config are valid Go templates +func validateTemplates(config SettingConfig) error { + templates := map[string]string{ + "PeerTemplate": config.PeerTemplateCMD, + "OrdererTemplate": config.OrdererTemplateCMD, + "BesuTemplate": config.BesuTemplateCMD, + } + + for name, tmpl := range templates { + _, err := template.New(name).Parse(tmpl) + if err != nil { + return fmt.Errorf("invalid %s: %w", name, err) + } + } + return nil +} + +// CreateSetting creates or updates the setting +func (s *SettingsService) CreateSetting(ctx context.Context, params CreateSettingParams) (*Setting, error) { + // Validate templates before proceeding + if err := validateTemplates(params.Config); err != nil { + return nil, fmt.Errorf("template validation failed: %w", err) + } + + configJSON, err := json.Marshal(params.Config) + if err != nil { + return nil, fmt.Errorf("failed to marshal config: %w", err) + } + + // Get existing setting if any + settings, err := s.queries.ListSettings(ctx) + if err != nil { + return nil, fmt.Errorf("failed to list settings: %w", err) + } + + var dbSetting *db.Setting + if len(settings) > 0 { + // Update existing setting + dbSetting, err = s.queries.UpdateSetting(ctx, &db.UpdateSettingParams{ + ID: settings[0].ID, + Config: string(configJSON), + }) + } else { + // Create new setting + dbSetting, err = s.queries.CreateSetting(ctx, string(configJSON)) + } + if err != nil { + return nil, fmt.Errorf("failed to save setting: %w", err) + } + + var config SettingConfig + if err := json.Unmarshal([]byte(dbSetting.Config), &config); err != nil { + return nil, fmt.Errorf("failed to unmarshal config: %w", err) + } + + return &Setting{ + ID: dbSetting.ID, + Config: config, + CreatedAt: dbSetting.CreatedAt.Time, + UpdatedAt: dbSetting.UpdatedAt.Time, + }, nil +} + +// GetSetting retrieves the setting or initializes with defaults if none exist +func (s *SettingsService) GetSetting(ctx context.Context) (*Setting, error) { + settings, err := s.queries.ListSettings(ctx) + if err != nil { + return nil, fmt.Errorf("failed to list settings: %w", err) + } + + if len(settings) == 0 { + return nil, fmt.Errorf("no settings found") + } + + dbSetting := settings[0] + + var config SettingConfig + if err := json.Unmarshal([]byte(dbSetting.Config), &config); err != nil { + return nil, fmt.Errorf("failed to unmarshal config: %w", err) + } + + return &Setting{ + ID: dbSetting.ID, + Config: config, + CreatedAt: dbSetting.CreatedAt.Time, + UpdatedAt: dbSetting.UpdatedAt.Time, + }, nil +} + +// initializeDefaultSettings creates the default settings in the database +func (s *SettingsService) InitializeDefaultSettings(ctx context.Context) (*Setting, error) { + configJSON, err := json.Marshal(defaultConfig) + if err != nil { + return nil, fmt.Errorf("failed to marshal default config: %w", err) + } + + dbSetting, err := s.queries.CreateSetting(ctx, string(configJSON)) + if err != nil { + return nil, fmt.Errorf("failed to create default settings: %w", err) + } + + return &Setting{ + ID: dbSetting.ID, + Config: defaultConfig, + CreatedAt: dbSetting.CreatedAt.Time, + UpdatedAt: dbSetting.UpdatedAt.Time, + }, nil +} + +// ListSettings returns all settings (only one row exists) +func (s *SettingsService) ListSettings(ctx context.Context) ([]*Setting, error) { + settings, err := s.queries.ListSettings(ctx) + if err != nil { + return nil, fmt.Errorf("failed to list settings: %w", err) + } + + if len(settings) == 0 { + return nil, fmt.Errorf("no settings found") + } + + dbSetting := settings[0] + + var config SettingConfig + if err := json.Unmarshal([]byte(dbSetting.Config), &config); err != nil { + return nil, fmt.Errorf("failed to unmarshal config: %w", err) + } + + return []*Setting{ + { + ID: dbSetting.ID, + Config: config, + CreatedAt: dbSetting.CreatedAt.Time, + UpdatedAt: dbSetting.UpdatedAt.Time, + }, + }, nil +} + +// UpdateSetting updates the setting +func (s *SettingsService) UpdateSetting(ctx context.Context, id int64, params UpdateSettingParams) (*Setting, error) { + // Validate templates before proceeding + if err := validateTemplates(params.Config); err != nil { + return nil, fmt.Errorf("template validation failed: %w", err) + } + + configJSON, err := json.Marshal(params.Config) + if err != nil { + return nil, fmt.Errorf("failed to marshal config: %w", err) + } + + settings, err := s.queries.ListSettings(ctx) + if err != nil { + return nil, fmt.Errorf("failed to list settings: %w", err) + } + + if len(settings) == 0 { + return nil, fmt.Errorf("no settings found") + } + + dbSetting, err := s.queries.UpdateSetting(ctx, &db.UpdateSettingParams{ + ID: settings[0].ID, + Config: string(configJSON), + }) + if err != nil { + return nil, fmt.Errorf("failed to update setting: %w", err) + } + + var config SettingConfig + if err := json.Unmarshal([]byte(dbSetting.Config), &config); err != nil { + return nil, fmt.Errorf("failed to unmarshal config: %w", err) + } + + return &Setting{ + ID: dbSetting.ID, + Config: config, + CreatedAt: dbSetting.CreatedAt.Time, + UpdatedAt: dbSetting.UpdatedAt.Time, + }, nil +} + +// DeleteSetting is deprecated as we maintain one persistent setting +func (s *SettingsService) DeleteSetting(ctx context.Context, id int64) error { + return fmt.Errorf("delete operation is not supported for settings") +} diff --git a/pkg/settings/service/types.go b/pkg/settings/service/types.go new file mode 100644 index 0000000..ec4c489 --- /dev/null +++ b/pkg/settings/service/types.go @@ -0,0 +1,29 @@ +package service + +import "time" + +// Template represents a node configuration template +type Template struct { + ID int64 `json:"id"` + Type string `json:"type"` + Name string `json:"name"` + Description string `json:"description"` + Template map[string]interface{} `json:"template"` + CreatedAt time.Time `json:"createdAt"` + UpdatedAt time.Time `json:"updatedAt"` +} + +// CreateTemplateParams represents parameters for creating a template +type CreateTemplateParams struct { + Type string `json:"type" validate:"required"` + Name string `json:"name" validate:"required"` + Description string `json:"description"` + Template map[string]interface{} `json:"template" validate:"required"` +} + +// UpdateTemplateParams represents parameters for updating a template +type UpdateTemplateParams struct { + Name string `json:"name,omitempty"` + Description string `json:"description,omitempty"` + Template map[string]interface{} `json:"template,omitempty"` +} diff --git a/sqlc.yaml b/sqlc.yaml index a9dbd90..7a96f18 100644 --- a/sqlc.yaml +++ b/sqlc.yaml @@ -8,7 +8,12 @@ sql: package: "db" out: "pkg/db" emit_json_tags: true - emit_prepared_queries: true emit_interface: true + emit_empty_slices: true emit_exact_table_names: false - emit_empty_slices: true \ No newline at end of file + emit_exported_queries: true + emit_result_struct_pointers: true + emit_params_struct_pointers: true + emit_enum_valid_method: true + emit_all_enum_values: true + json_tags_case_style: "camel" \ No newline at end of file diff --git a/web/bun.lockb b/web/bun.lockb index 5f3218f..3dce90b 100755 Binary files a/web/bun.lockb and b/web/bun.lockb differ diff --git a/web/package.json b/web/package.json index 96f4b78..19db435 100644 --- a/web/package.json +++ b/web/package.json @@ -33,12 +33,14 @@ "@radix-ui/react-tooltip": "^1.1.6", "@tanstack/react-query": "^5.62.11", "@tanstack/react-table": "^8.21.2", + "ansi-to-html": "^0.7.2", "autoprefixer": "^10.4.20", "buffer": "^6.0.3", "class-variance-authority": "^0.7.1", "clsx": "^2.1.1", "cmdk": "1.0.0", "date-fns": "^4.1.0", + "dompurify": "^3.2.5", "lucide-react": "^0.469.0", "next-themes": "^0.4.4", "postcss": "^8.4.49", @@ -49,6 +51,7 @@ "react-router-dom": "^7.1.1", "react-syntax-highlighter": "^15.6.1", "recharts": "^2.15.0", + "redoc": "^2.5.0", "rehype-highlight": "^7.0.2", "rehype-raw": "^7.0.0", "sonner": "^1.7.1", diff --git a/web/src/App.tsx b/web/src/App.tsx index fddfe53..f1ff98a 100644 --- a/web/src/App.tsx +++ b/web/src/App.tsx @@ -12,8 +12,11 @@ import { AuthProvider } from './contexts/AuthContext' import { BreadcrumbProvider } from './contexts/BreadcrumbContext' import './globals.css' +import SharedNetworksPage from '@/pages/networks/fabric/shared' +import ImportNetworkPage from '@/pages/networks/import' import CreateBesuNodePage from '@/pages/nodes/besu/create' import CreateFabricNodePage from '@/pages/nodes/fabric/create' +import EditFabricNodePage from '@/pages/nodes/fabric/edit' import NodesLogsPage from '@/pages/nodes/logs' import { Toaster } from './components/ui/sonner' import CertificateTemplatesPage from './pages/identity/certificates' @@ -40,8 +43,11 @@ import KeyManagementPage from './pages/settings/keys' import KeyDetailPage from './pages/settings/keys/[id]' import NetworkConfigPage from './pages/settings/network' import SmartContractsPage from './pages/smart-contracts' -import SharedNetworksPage from '@/pages/networks/fabric/shared' -import ImportNetworkPage from '@/pages/networks/import' +import { BlocksOverview } from '@/components/networks/blocks-overview' +import { BlockDetails } from '@/components/networks/block-details' +import ApiDocumentationPage from './pages/api-documentation' +import BulkCreateBesuNetworkPage from './pages/networks/besu/bulk-create' +import EditBesuNodePage from './pages/nodes/besu/edit' const queryClient = new QueryClient({ defaultOptions: { @@ -67,7 +73,7 @@ const App = () => {
-
+
} /> @@ -89,17 +95,23 @@ const App = () => { } /> } /> } /> + } /> + } /> } /> } /> } /> } /> } /> + } /> + } /> } /> } /> } /> } /> } /> } /> + } /> + } /> } /> diff --git a/web/src/api/client/@tanstack/react-query.gen.ts b/web/src/api/client/@tanstack/react-query.gen.ts index b571eab..b0f27d2 100644 --- a/web/src/api/client/@tanstack/react-query.gen.ts +++ b/web/src/api/client/@tanstack/react-query.gen.ts @@ -2,8 +2,8 @@ import type { Options } from '@hey-api/client-fetch'; import { queryOptions, type UseMutationOptions, type DefaultError, infiniteQueryOptions, type InfiniteData } from '@tanstack/react-query'; -import type { PostAuthLoginData, PostAuthLoginError, PostAuthLoginResponse, PostAuthLogoutData, PostAuthLogoutError, PostAuthLogoutResponse, GetAuthMeData, GetBackupsData, PostBackupsData, PostBackupsError, PostBackupsResponse, GetBackupsSchedulesData, PostBackupsSchedulesData, PostBackupsSchedulesError, PostBackupsSchedulesResponse, DeleteBackupsSchedulesByIdData, DeleteBackupsSchedulesByIdError, GetBackupsSchedulesByIdData, PutBackupsSchedulesByIdData, PutBackupsSchedulesByIdError, PutBackupsSchedulesByIdResponse, PutBackupsSchedulesByIdDisableData, PutBackupsSchedulesByIdDisableError, PutBackupsSchedulesByIdDisableResponse, PutBackupsSchedulesByIdEnableData, PutBackupsSchedulesByIdEnableError, PutBackupsSchedulesByIdEnableResponse, GetBackupsTargetsData, PostBackupsTargetsData, PostBackupsTargetsError, PostBackupsTargetsResponse, DeleteBackupsTargetsByIdData, DeleteBackupsTargetsByIdError, GetBackupsTargetsByIdData, PutBackupsTargetsByIdData, PutBackupsTargetsByIdError, PutBackupsTargetsByIdResponse, DeleteBackupsByIdData, DeleteBackupsByIdError, GetBackupsByIdData, PostDummyData, PostDummyResponse, GetKeyProvidersData, PostKeyProvidersData, PostKeyProvidersError, PostKeyProvidersResponse, DeleteKeyProvidersByIdData, DeleteKeyProvidersByIdError, GetKeyProvidersByIdData, GetKeysData, GetKeysError, GetKeysResponse, PostKeysData, PostKeysError, PostKeysResponse, GetKeysAllData, GetKeysFilterData, GetKeysFilterError, GetKeysFilterResponse, DeleteKeysByIdData, DeleteKeysByIdError, GetKeysByIdData, PostKeysByKeyIdSignData, PostKeysByKeyIdSignError, PostKeysByKeyIdSignResponse, GetNetworksBesuData, GetNetworksBesuError, GetNetworksBesuResponse, PostNetworksBesuData, PostNetworksBesuError, PostNetworksBesuResponse, PostNetworksBesuImportData, PostNetworksBesuImportError, PostNetworksBesuImportResponse, DeleteNetworksBesuByIdData, DeleteNetworksBesuByIdError, GetNetworksBesuByIdData, GetNetworksFabricData, GetNetworksFabricError, GetNetworksFabricResponse, PostNetworksFabricData, PostNetworksFabricError, PostNetworksFabricResponse, GetNetworksFabricByNameByNameData, PostNetworksFabricImportData, PostNetworksFabricImportError, PostNetworksFabricImportResponse, PostNetworksFabricImportWithOrgData, PostNetworksFabricImportWithOrgError, PostNetworksFabricImportWithOrgResponse, DeleteNetworksFabricByIdData, DeleteNetworksFabricByIdError, GetNetworksFabricByIdData, PostNetworksFabricByIdAnchorPeersData, PostNetworksFabricByIdAnchorPeersError, PostNetworksFabricByIdAnchorPeersResponse, GetNetworksFabricByIdChannelConfigData, GetNetworksFabricByIdCurrentChannelConfigData, GetNetworksFabricByIdNodesData, PostNetworksFabricByIdNodesData, PostNetworksFabricByIdNodesError, PostNetworksFabricByIdNodesResponse, DeleteNetworksFabricByIdOrderersByOrdererIdData, DeleteNetworksFabricByIdOrderersByOrdererIdError, DeleteNetworksFabricByIdOrderersByOrdererIdResponse, PostNetworksFabricByIdOrderersByOrdererIdJoinData, PostNetworksFabricByIdOrderersByOrdererIdJoinError, PostNetworksFabricByIdOrderersByOrdererIdJoinResponse, PostNetworksFabricByIdOrderersByOrdererIdUnjoinData, PostNetworksFabricByIdOrderersByOrdererIdUnjoinError, PostNetworksFabricByIdOrderersByOrdererIdUnjoinResponse, GetNetworksFabricByIdOrganizationsByOrgIdConfigData, DeleteNetworksFabricByIdPeersByPeerIdData, DeleteNetworksFabricByIdPeersByPeerIdError, DeleteNetworksFabricByIdPeersByPeerIdResponse, PostNetworksFabricByIdPeersByPeerIdJoinData, PostNetworksFabricByIdPeersByPeerIdJoinError, PostNetworksFabricByIdPeersByPeerIdJoinResponse, PostNetworksFabricByIdPeersByPeerIdUnjoinData, PostNetworksFabricByIdPeersByPeerIdUnjoinError, PostNetworksFabricByIdPeersByPeerIdUnjoinResponse, PostNetworksFabricByIdReloadBlockData, PostNetworksFabricByIdReloadBlockError, PostNetworksFabricByIdReloadBlockResponse, GetNodesData, GetNodesError, GetNodesResponse, PostNodesData, PostNodesError, PostNodesResponse, GetNodesDefaultsBesuNodeData, GetNodesDefaultsFabricData, GetNodesDefaultsFabricOrdererData, GetNodesDefaultsFabricPeerData, GetNodesPlatformByPlatformData, GetNodesPlatformByPlatformError, GetNodesPlatformByPlatformResponse, DeleteNodesByIdData, DeleteNodesByIdError, GetNodesByIdData, GetNodesByIdEventsData, GetNodesByIdEventsError, GetNodesByIdEventsResponse, GetNodesByIdLogsData, PostNodesByIdRestartData, PostNodesByIdRestartError, PostNodesByIdRestartResponse, PostNodesByIdStartData, PostNodesByIdStartError, PostNodesByIdStartResponse, PostNodesByIdStopData, PostNodesByIdStopError, PostNodesByIdStopResponse, GetNotificationsProvidersData, PostNotificationsProvidersData, PostNotificationsProvidersError, PostNotificationsProvidersResponse, DeleteNotificationsProvidersByIdData, DeleteNotificationsProvidersByIdError, GetNotificationsProvidersByIdData, PutNotificationsProvidersByIdData, PutNotificationsProvidersByIdError, PutNotificationsProvidersByIdResponse, PostNotificationsProvidersByIdTestData, PostNotificationsProvidersByIdTestError, PostNotificationsProvidersByIdTestResponse, GetOrganizationsData, PostOrganizationsData, PostOrganizationsError, PostOrganizationsResponse, GetOrganizationsByMspidByMspidData, DeleteOrganizationsByIdData, DeleteOrganizationsByIdError, GetOrganizationsByIdData, PutOrganizationsByIdData, PutOrganizationsByIdError, PutOrganizationsByIdResponse } from '../types.gen'; -import { postAuthLogin, postAuthLogout, getAuthMe, getBackups, postBackups, getBackupsSchedules, postBackupsSchedules, deleteBackupsSchedulesById, getBackupsSchedulesById, putBackupsSchedulesById, putBackupsSchedulesByIdDisable, putBackupsSchedulesByIdEnable, getBackupsTargets, postBackupsTargets, deleteBackupsTargetsById, getBackupsTargetsById, putBackupsTargetsById, deleteBackupsById, getBackupsById, postDummy, getKeyProviders, postKeyProviders, deleteKeyProvidersById, getKeyProvidersById, getKeys, postKeys, getKeysAll, getKeysFilter, deleteKeysById, getKeysById, postKeysByKeyIdSign, getNetworksBesu, postNetworksBesu, postNetworksBesuImport, deleteNetworksBesuById, getNetworksBesuById, getNetworksFabric, postNetworksFabric, getNetworksFabricByNameByName, postNetworksFabricImport, postNetworksFabricImportWithOrg, deleteNetworksFabricById, getNetworksFabricById, postNetworksFabricByIdAnchorPeers, getNetworksFabricByIdChannelConfig, getNetworksFabricByIdCurrentChannelConfig, getNetworksFabricByIdNodes, postNetworksFabricByIdNodes, deleteNetworksFabricByIdOrderersByOrdererId, postNetworksFabricByIdOrderersByOrdererIdJoin, postNetworksFabricByIdOrderersByOrdererIdUnjoin, getNetworksFabricByIdOrganizationsByOrgIdConfig, deleteNetworksFabricByIdPeersByPeerId, postNetworksFabricByIdPeersByPeerIdJoin, postNetworksFabricByIdPeersByPeerIdUnjoin, postNetworksFabricByIdReloadBlock, getNodes, postNodes, getNodesDefaultsBesuNode, getNodesDefaultsFabric, getNodesDefaultsFabricOrderer, getNodesDefaultsFabricPeer, getNodesPlatformByPlatform, deleteNodesById, getNodesById, getNodesByIdEvents, getNodesByIdLogs, postNodesByIdRestart, postNodesByIdStart, postNodesByIdStop, getNotificationsProviders, postNotificationsProviders, deleteNotificationsProvidersById, getNotificationsProvidersById, putNotificationsProvidersById, postNotificationsProvidersByIdTest, getOrganizations, postOrganizations, getOrganizationsByMspidByMspid, deleteOrganizationsById, getOrganizationsById, putOrganizationsById, client } from '../sdk.gen'; +import type { PostAuthLoginData, PostAuthLoginError, PostAuthLoginResponse, PostAuthLogoutData, PostAuthLogoutError, PostAuthLogoutResponse, GetAuthMeData, GetBackupsData, PostBackupsData, PostBackupsError, PostBackupsResponse, GetBackupsSchedulesData, PostBackupsSchedulesData, PostBackupsSchedulesError, PostBackupsSchedulesResponse, DeleteBackupsSchedulesByIdData, DeleteBackupsSchedulesByIdError, GetBackupsSchedulesByIdData, PutBackupsSchedulesByIdData, PutBackupsSchedulesByIdError, PutBackupsSchedulesByIdResponse, PutBackupsSchedulesByIdDisableData, PutBackupsSchedulesByIdDisableError, PutBackupsSchedulesByIdDisableResponse, PutBackupsSchedulesByIdEnableData, PutBackupsSchedulesByIdEnableError, PutBackupsSchedulesByIdEnableResponse, GetBackupsTargetsData, PostBackupsTargetsData, PostBackupsTargetsError, PostBackupsTargetsResponse, DeleteBackupsTargetsByIdData, DeleteBackupsTargetsByIdError, GetBackupsTargetsByIdData, PutBackupsTargetsByIdData, PutBackupsTargetsByIdError, PutBackupsTargetsByIdResponse, DeleteBackupsByIdData, DeleteBackupsByIdError, GetBackupsByIdData, PostDummyData, PostDummyResponse, GetKeyProvidersData, PostKeyProvidersData, PostKeyProvidersError, PostKeyProvidersResponse, DeleteKeyProvidersByIdData, DeleteKeyProvidersByIdError, GetKeyProvidersByIdData, GetKeysData, GetKeysError, GetKeysResponse, PostKeysData, PostKeysError, PostKeysResponse, GetKeysAllData, GetKeysFilterData, GetKeysFilterError, GetKeysFilterResponse, DeleteKeysByIdData, DeleteKeysByIdError, GetKeysByIdData, PostKeysByKeyIdSignData, PostKeysByKeyIdSignError, PostKeysByKeyIdSignResponse, GetNetworksBesuData, GetNetworksBesuError, GetNetworksBesuResponse, PostNetworksBesuData, PostNetworksBesuError, PostNetworksBesuResponse, PostNetworksBesuImportData, PostNetworksBesuImportError, PostNetworksBesuImportResponse, DeleteNetworksBesuByIdData, DeleteNetworksBesuByIdError, GetNetworksBesuByIdData, GetNetworksFabricData, GetNetworksFabricError, GetNetworksFabricResponse, PostNetworksFabricData, PostNetworksFabricError, PostNetworksFabricResponse, GetNetworksFabricByNameByNameData, PostNetworksFabricImportData, PostNetworksFabricImportError, PostNetworksFabricImportResponse, PostNetworksFabricImportWithOrgData, PostNetworksFabricImportWithOrgError, PostNetworksFabricImportWithOrgResponse, DeleteNetworksFabricByIdData, DeleteNetworksFabricByIdError, GetNetworksFabricByIdData, PostNetworksFabricByIdAnchorPeersData, PostNetworksFabricByIdAnchorPeersError, PostNetworksFabricByIdAnchorPeersResponse, GetNetworksFabricByIdBlocksData, GetNetworksFabricByIdBlocksError, GetNetworksFabricByIdBlocksResponse, GetNetworksFabricByIdBlocksByBlockNumData, GetNetworksFabricByIdChannelConfigData, GetNetworksFabricByIdCurrentChannelConfigData, GetNetworksFabricByIdInfoData, GetNetworksFabricByIdNodesData, PostNetworksFabricByIdNodesData, PostNetworksFabricByIdNodesError, PostNetworksFabricByIdNodesResponse, DeleteNetworksFabricByIdOrderersByOrdererIdData, DeleteNetworksFabricByIdOrderersByOrdererIdError, DeleteNetworksFabricByIdOrderersByOrdererIdResponse, PostNetworksFabricByIdOrderersByOrdererIdJoinData, PostNetworksFabricByIdOrderersByOrdererIdJoinError, PostNetworksFabricByIdOrderersByOrdererIdJoinResponse, PostNetworksFabricByIdOrderersByOrdererIdUnjoinData, PostNetworksFabricByIdOrderersByOrdererIdUnjoinError, PostNetworksFabricByIdOrderersByOrdererIdUnjoinResponse, PostNetworksFabricByIdOrganizationCrlData, PostNetworksFabricByIdOrganizationCrlError, PostNetworksFabricByIdOrganizationCrlResponse, GetNetworksFabricByIdOrganizationsByOrgIdConfigData, DeleteNetworksFabricByIdPeersByPeerIdData, DeleteNetworksFabricByIdPeersByPeerIdError, DeleteNetworksFabricByIdPeersByPeerIdResponse, PostNetworksFabricByIdPeersByPeerIdJoinData, PostNetworksFabricByIdPeersByPeerIdJoinError, PostNetworksFabricByIdPeersByPeerIdJoinResponse, PostNetworksFabricByIdPeersByPeerIdUnjoinData, PostNetworksFabricByIdPeersByPeerIdUnjoinError, PostNetworksFabricByIdPeersByPeerIdUnjoinResponse, PostNetworksFabricByIdReloadBlockData, PostNetworksFabricByIdReloadBlockError, PostNetworksFabricByIdReloadBlockResponse, GetNetworksFabricByIdTransactionsByTxIdData, PostNetworksFabricByIdUpdateConfigData, PostNetworksFabricByIdUpdateConfigError, PostNetworksFabricByIdUpdateConfigResponse, GetNodesData, GetNodesError, GetNodesResponse, PostNodesData, PostNodesError, PostNodesResponse, GetNodesDefaultsBesuNodeData, GetNodesDefaultsFabricData, GetNodesDefaultsFabricOrdererData, GetNodesDefaultsFabricPeerData, GetNodesPlatformByPlatformData, GetNodesPlatformByPlatformError, GetNodesPlatformByPlatformResponse, DeleteNodesByIdData, DeleteNodesByIdError, GetNodesByIdData, PutNodesByIdData, PutNodesByIdError, PutNodesByIdResponse, PostNodesByIdCertificatesRenewData, PostNodesByIdCertificatesRenewError, PostNodesByIdCertificatesRenewResponse, GetNodesByIdChannelsData, GetNodesByIdEventsData, GetNodesByIdEventsError, GetNodesByIdEventsResponse, GetNodesByIdLogsData, PostNodesByIdRestartData, PostNodesByIdRestartError, PostNodesByIdRestartResponse, PostNodesByIdStartData, PostNodesByIdStartError, PostNodesByIdStartResponse, PostNodesByIdStopData, PostNodesByIdStopError, PostNodesByIdStopResponse, GetNotificationsProvidersData, PostNotificationsProvidersData, PostNotificationsProvidersError, PostNotificationsProvidersResponse, DeleteNotificationsProvidersByIdData, DeleteNotificationsProvidersByIdError, GetNotificationsProvidersByIdData, PutNotificationsProvidersByIdData, PutNotificationsProvidersByIdError, PutNotificationsProvidersByIdResponse, PostNotificationsProvidersByIdTestData, PostNotificationsProvidersByIdTestError, PostNotificationsProvidersByIdTestResponse, GetOrganizationsData, PostOrganizationsData, PostOrganizationsError, PostOrganizationsResponse, GetOrganizationsByMspidByMspidData, DeleteOrganizationsByIdData, DeleteOrganizationsByIdError, GetOrganizationsByIdData, PutOrganizationsByIdData, PutOrganizationsByIdError, PutOrganizationsByIdResponse, GetOrganizationsByIdCrlData, PostOrganizationsByIdCrlRevokePemData, PostOrganizationsByIdCrlRevokePemError, PostOrganizationsByIdCrlRevokePemResponse, DeleteOrganizationsByIdCrlRevokeSerialData, DeleteOrganizationsByIdCrlRevokeSerialError, DeleteOrganizationsByIdCrlRevokeSerialResponse, PostOrganizationsByIdCrlRevokeSerialData, PostOrganizationsByIdCrlRevokeSerialError, PostOrganizationsByIdCrlRevokeSerialResponse, GetOrganizationsByIdRevokedCertificatesData, GetSettingsData, PostSettingsData, PostSettingsResponse } from '../types.gen'; +import { postAuthLogin, postAuthLogout, getAuthMe, getBackups, postBackups, getBackupsSchedules, postBackupsSchedules, deleteBackupsSchedulesById, getBackupsSchedulesById, putBackupsSchedulesById, putBackupsSchedulesByIdDisable, putBackupsSchedulesByIdEnable, getBackupsTargets, postBackupsTargets, deleteBackupsTargetsById, getBackupsTargetsById, putBackupsTargetsById, deleteBackupsById, getBackupsById, postDummy, getKeyProviders, postKeyProviders, deleteKeyProvidersById, getKeyProvidersById, getKeys, postKeys, getKeysAll, getKeysFilter, deleteKeysById, getKeysById, postKeysByKeyIdSign, getNetworksBesu, postNetworksBesu, postNetworksBesuImport, deleteNetworksBesuById, getNetworksBesuById, getNetworksFabric, postNetworksFabric, getNetworksFabricByNameByName, postNetworksFabricImport, postNetworksFabricImportWithOrg, deleteNetworksFabricById, getNetworksFabricById, postNetworksFabricByIdAnchorPeers, getNetworksFabricByIdBlocks, getNetworksFabricByIdBlocksByBlockNum, getNetworksFabricByIdChannelConfig, getNetworksFabricByIdCurrentChannelConfig, getNetworksFabricByIdInfo, getNetworksFabricByIdNodes, postNetworksFabricByIdNodes, deleteNetworksFabricByIdOrderersByOrdererId, postNetworksFabricByIdOrderersByOrdererIdJoin, postNetworksFabricByIdOrderersByOrdererIdUnjoin, postNetworksFabricByIdOrganizationCrl, getNetworksFabricByIdOrganizationsByOrgIdConfig, deleteNetworksFabricByIdPeersByPeerId, postNetworksFabricByIdPeersByPeerIdJoin, postNetworksFabricByIdPeersByPeerIdUnjoin, postNetworksFabricByIdReloadBlock, getNetworksFabricByIdTransactionsByTxId, postNetworksFabricByIdUpdateConfig, getNodes, postNodes, getNodesDefaultsBesuNode, getNodesDefaultsFabric, getNodesDefaultsFabricOrderer, getNodesDefaultsFabricPeer, getNodesPlatformByPlatform, deleteNodesById, getNodesById, putNodesById, postNodesByIdCertificatesRenew, getNodesByIdChannels, getNodesByIdEvents, getNodesByIdLogs, postNodesByIdRestart, postNodesByIdStart, postNodesByIdStop, getNotificationsProviders, postNotificationsProviders, deleteNotificationsProvidersById, getNotificationsProvidersById, putNotificationsProvidersById, postNotificationsProvidersByIdTest, getOrganizations, postOrganizations, getOrganizationsByMspidByMspid, deleteOrganizationsById, getOrganizationsById, putOrganizationsById, getOrganizationsByIdCrl, postOrganizationsByIdCrlRevokePem, deleteOrganizationsByIdCrlRevokeSerial, postOrganizationsByIdCrlRevokeSerial, getOrganizationsByIdRevokedCertificates, getSettings, postSettings, client } from '../sdk.gen'; type QueryKey = [ Pick & { @@ -1164,6 +1164,72 @@ export const postNetworksFabricByIdAnchorPeersMutation = (options?: Partial) => [ + createQueryKey('getNetworksFabricByIdBlocks', options) +]; + +export const getNetworksFabricByIdBlocksOptions = (options: Options) => { + return queryOptions({ + queryFn: async ({ queryKey, signal }) => { + const { data } = await getNetworksFabricByIdBlocks({ + ...options, + ...queryKey[0], + signal, + throwOnError: true + }); + return data; + }, + queryKey: getNetworksFabricByIdBlocksQueryKey(options) + }); +}; + +export const getNetworksFabricByIdBlocksInfiniteQueryKey = (options: Options): QueryKey> => [ + createQueryKey('getNetworksFabricByIdBlocks', options, true) +]; + +export const getNetworksFabricByIdBlocksInfiniteOptions = (options: Options) => { + return infiniteQueryOptions, QueryKey>, number | Pick>[0], 'body' | 'headers' | 'path' | 'query'>>( + // @ts-ignore + { + queryFn: async ({ pageParam, queryKey, signal }) => { + // @ts-ignore + const page: Pick>[0], 'body' | 'headers' | 'path' | 'query'> = typeof pageParam === 'object' ? pageParam : { + query: { + offset: pageParam + } + }; + const params = createInfiniteParams(queryKey, page); + const { data } = await getNetworksFabricByIdBlocks({ + ...options, + ...params, + signal, + throwOnError: true + }); + return data; + }, + queryKey: getNetworksFabricByIdBlocksInfiniteQueryKey(options) + }); +}; + +export const getNetworksFabricByIdBlocksByBlockNumQueryKey = (options: Options) => [ + createQueryKey('getNetworksFabricByIdBlocksByBlockNum', options) +]; + +export const getNetworksFabricByIdBlocksByBlockNumOptions = (options: Options) => { + return queryOptions({ + queryFn: async ({ queryKey, signal }) => { + const { data } = await getNetworksFabricByIdBlocksByBlockNum({ + ...options, + ...queryKey[0], + signal, + throwOnError: true + }); + return data; + }, + queryKey: getNetworksFabricByIdBlocksByBlockNumQueryKey(options) + }); +}; + export const getNetworksFabricByIdChannelConfigQueryKey = (options: Options) => [ createQueryKey('getNetworksFabricByIdChannelConfig', options) ]; @@ -1202,6 +1268,25 @@ export const getNetworksFabricByIdCurrentChannelConfigOptions = (options: Option }); }; +export const getNetworksFabricByIdInfoQueryKey = (options: Options) => [ + createQueryKey('getNetworksFabricByIdInfo', options) +]; + +export const getNetworksFabricByIdInfoOptions = (options: Options) => { + return queryOptions({ + queryFn: async ({ queryKey, signal }) => { + const { data } = await getNetworksFabricByIdInfo({ + ...options, + ...queryKey[0], + signal, + throwOnError: true + }); + return data; + }, + queryKey: getNetworksFabricByIdInfoQueryKey(options) + }); +}; + export const getNetworksFabricByIdNodesQueryKey = (options: Options) => [ createQueryKey('getNetworksFabricByIdNodes', options) ]; @@ -1334,6 +1419,39 @@ export const postNetworksFabricByIdOrderersByOrdererIdUnjoinMutation = (options? return mutationOptions; }; +export const postNetworksFabricByIdOrganizationCrlQueryKey = (options: Options) => [ + createQueryKey('postNetworksFabricByIdOrganizationCrl', options) +]; + +export const postNetworksFabricByIdOrganizationCrlOptions = (options: Options) => { + return queryOptions({ + queryFn: async ({ queryKey, signal }) => { + const { data } = await postNetworksFabricByIdOrganizationCrl({ + ...options, + ...queryKey[0], + signal, + throwOnError: true + }); + return data; + }, + queryKey: postNetworksFabricByIdOrganizationCrlQueryKey(options) + }); +}; + +export const postNetworksFabricByIdOrganizationCrlMutation = (options?: Partial>) => { + const mutationOptions: UseMutationOptions> = { + mutationFn: async (localOptions) => { + const { data } = await postNetworksFabricByIdOrganizationCrl({ + ...options, + ...localOptions, + throwOnError: true + }); + return data; + } + }; + return mutationOptions; +}; + export const getNetworksFabricByIdOrganizationsByOrgIdConfigQueryKey = (options: Options) => [ createQueryKey('getNetworksFabricByIdOrganizationsByOrgIdConfig', options) ]; @@ -1466,6 +1584,58 @@ export const postNetworksFabricByIdReloadBlockMutation = (options?: Partial) => [ + createQueryKey('getNetworksFabricByIdTransactionsByTxId', options) +]; + +export const getNetworksFabricByIdTransactionsByTxIdOptions = (options: Options) => { + return queryOptions({ + queryFn: async ({ queryKey, signal }) => { + const { data } = await getNetworksFabricByIdTransactionsByTxId({ + ...options, + ...queryKey[0], + signal, + throwOnError: true + }); + return data; + }, + queryKey: getNetworksFabricByIdTransactionsByTxIdQueryKey(options) + }); +}; + +export const postNetworksFabricByIdUpdateConfigQueryKey = (options: Options) => [ + createQueryKey('postNetworksFabricByIdUpdateConfig', options) +]; + +export const postNetworksFabricByIdUpdateConfigOptions = (options: Options) => { + return queryOptions({ + queryFn: async ({ queryKey, signal }) => { + const { data } = await postNetworksFabricByIdUpdateConfig({ + ...options, + ...queryKey[0], + signal, + throwOnError: true + }); + return data; + }, + queryKey: postNetworksFabricByIdUpdateConfigQueryKey(options) + }); +}; + +export const postNetworksFabricByIdUpdateConfigMutation = (options?: Partial>) => { + const mutationOptions: UseMutationOptions> = { + mutationFn: async (localOptions) => { + const { data } = await postNetworksFabricByIdUpdateConfig({ + ...options, + ...localOptions, + throwOnError: true + }); + return data; + } + }; + return mutationOptions; +}; + export const getNodesQueryKey = (options?: Options) => [ createQueryKey('getNodes', options) ]; @@ -1702,6 +1872,72 @@ export const getNodesByIdOptions = (options: Options) => { }); }; +export const putNodesByIdMutation = (options?: Partial>) => { + const mutationOptions: UseMutationOptions> = { + mutationFn: async (localOptions) => { + const { data } = await putNodesById({ + ...options, + ...localOptions, + throwOnError: true + }); + return data; + } + }; + return mutationOptions; +}; + +export const postNodesByIdCertificatesRenewQueryKey = (options: Options) => [ + createQueryKey('postNodesByIdCertificatesRenew', options) +]; + +export const postNodesByIdCertificatesRenewOptions = (options: Options) => { + return queryOptions({ + queryFn: async ({ queryKey, signal }) => { + const { data } = await postNodesByIdCertificatesRenew({ + ...options, + ...queryKey[0], + signal, + throwOnError: true + }); + return data; + }, + queryKey: postNodesByIdCertificatesRenewQueryKey(options) + }); +}; + +export const postNodesByIdCertificatesRenewMutation = (options?: Partial>) => { + const mutationOptions: UseMutationOptions> = { + mutationFn: async (localOptions) => { + const { data } = await postNodesByIdCertificatesRenew({ + ...options, + ...localOptions, + throwOnError: true + }); + return data; + } + }; + return mutationOptions; +}; + +export const getNodesByIdChannelsQueryKey = (options: Options) => [ + createQueryKey('getNodesByIdChannels', options) +]; + +export const getNodesByIdChannelsOptions = (options: Options) => { + return queryOptions({ + queryFn: async ({ queryKey, signal }) => { + const { data } = await getNodesByIdChannels({ + ...options, + ...queryKey[0], + signal, + throwOnError: true + }); + return data; + }, + queryKey: getNodesByIdChannelsQueryKey(options) + }); +}; + export const getNodesByIdEventsQueryKey = (options: Options) => [ createQueryKey('getNodesByIdEvents', options) ]; @@ -2115,4 +2351,174 @@ export const putOrganizationsByIdMutation = (options?: Partial) => [ + createQueryKey('getOrganizationsByIdCrl', options) +]; + +export const getOrganizationsByIdCrlOptions = (options: Options) => { + return queryOptions({ + queryFn: async ({ queryKey, signal }) => { + const { data } = await getOrganizationsByIdCrl({ + ...options, + ...queryKey[0], + signal, + throwOnError: true + }); + return data; + }, + queryKey: getOrganizationsByIdCrlQueryKey(options) + }); +}; + +export const postOrganizationsByIdCrlRevokePemQueryKey = (options: Options) => [ + createQueryKey('postOrganizationsByIdCrlRevokePem', options) +]; + +export const postOrganizationsByIdCrlRevokePemOptions = (options: Options) => { + return queryOptions({ + queryFn: async ({ queryKey, signal }) => { + const { data } = await postOrganizationsByIdCrlRevokePem({ + ...options, + ...queryKey[0], + signal, + throwOnError: true + }); + return data; + }, + queryKey: postOrganizationsByIdCrlRevokePemQueryKey(options) + }); +}; + +export const postOrganizationsByIdCrlRevokePemMutation = (options?: Partial>) => { + const mutationOptions: UseMutationOptions> = { + mutationFn: async (localOptions) => { + const { data } = await postOrganizationsByIdCrlRevokePem({ + ...options, + ...localOptions, + throwOnError: true + }); + return data; + } + }; + return mutationOptions; +}; + +export const deleteOrganizationsByIdCrlRevokeSerialMutation = (options?: Partial>) => { + const mutationOptions: UseMutationOptions> = { + mutationFn: async (localOptions) => { + const { data } = await deleteOrganizationsByIdCrlRevokeSerial({ + ...options, + ...localOptions, + throwOnError: true + }); + return data; + } + }; + return mutationOptions; +}; + +export const postOrganizationsByIdCrlRevokeSerialQueryKey = (options: Options) => [ + createQueryKey('postOrganizationsByIdCrlRevokeSerial', options) +]; + +export const postOrganizationsByIdCrlRevokeSerialOptions = (options: Options) => { + return queryOptions({ + queryFn: async ({ queryKey, signal }) => { + const { data } = await postOrganizationsByIdCrlRevokeSerial({ + ...options, + ...queryKey[0], + signal, + throwOnError: true + }); + return data; + }, + queryKey: postOrganizationsByIdCrlRevokeSerialQueryKey(options) + }); +}; + +export const postOrganizationsByIdCrlRevokeSerialMutation = (options?: Partial>) => { + const mutationOptions: UseMutationOptions> = { + mutationFn: async (localOptions) => { + const { data } = await postOrganizationsByIdCrlRevokeSerial({ + ...options, + ...localOptions, + throwOnError: true + }); + return data; + } + }; + return mutationOptions; +}; + +export const getOrganizationsByIdRevokedCertificatesQueryKey = (options: Options) => [ + createQueryKey('getOrganizationsByIdRevokedCertificates', options) +]; + +export const getOrganizationsByIdRevokedCertificatesOptions = (options: Options) => { + return queryOptions({ + queryFn: async ({ queryKey, signal }) => { + const { data } = await getOrganizationsByIdRevokedCertificates({ + ...options, + ...queryKey[0], + signal, + throwOnError: true + }); + return data; + }, + queryKey: getOrganizationsByIdRevokedCertificatesQueryKey(options) + }); +}; + +export const getSettingsQueryKey = (options?: Options) => [ + createQueryKey('getSettings', options) +]; + +export const getSettingsOptions = (options?: Options) => { + return queryOptions({ + queryFn: async ({ queryKey, signal }) => { + const { data } = await getSettings({ + ...options, + ...queryKey[0], + signal, + throwOnError: true + }); + return data; + }, + queryKey: getSettingsQueryKey(options) + }); +}; + +export const postSettingsQueryKey = (options: Options) => [ + createQueryKey('postSettings', options) +]; + +export const postSettingsOptions = (options: Options) => { + return queryOptions({ + queryFn: async ({ queryKey, signal }) => { + const { data } = await postSettings({ + ...options, + ...queryKey[0], + signal, + throwOnError: true + }); + return data; + }, + queryKey: postSettingsQueryKey(options) + }); +}; + +export const postSettingsMutation = (options?: Partial>) => { + const mutationOptions: UseMutationOptions> = { + mutationFn: async (localOptions) => { + const { data } = await postSettings({ + ...options, + ...localOptions, + throwOnError: true + }); + return data; + } + }; + return mutationOptions; }; \ No newline at end of file diff --git a/web/src/api/client/sdk.gen.ts b/web/src/api/client/sdk.gen.ts index 292423e..c0be9a8 100644 --- a/web/src/api/client/sdk.gen.ts +++ b/web/src/api/client/sdk.gen.ts @@ -1,7 +1,7 @@ // This file is auto-generated by @hey-api/openapi-ts import { createClient, createConfig, type Options } from '@hey-api/client-fetch'; -import type { PostAuthLoginData, PostAuthLoginResponse, PostAuthLoginError, PostAuthLogoutData, PostAuthLogoutResponse, PostAuthLogoutError, GetAuthMeData, GetAuthMeResponse, GetAuthMeError, GetBackupsData, GetBackupsResponse, GetBackupsError, PostBackupsData, PostBackupsResponse, PostBackupsError, GetBackupsSchedulesData, GetBackupsSchedulesResponse, GetBackupsSchedulesError, PostBackupsSchedulesData, PostBackupsSchedulesResponse, PostBackupsSchedulesError, DeleteBackupsSchedulesByIdData, DeleteBackupsSchedulesByIdError, GetBackupsSchedulesByIdData, GetBackupsSchedulesByIdResponse, GetBackupsSchedulesByIdError, PutBackupsSchedulesByIdData, PutBackupsSchedulesByIdResponse, PutBackupsSchedulesByIdError, PutBackupsSchedulesByIdDisableData, PutBackupsSchedulesByIdDisableResponse, PutBackupsSchedulesByIdDisableError, PutBackupsSchedulesByIdEnableData, PutBackupsSchedulesByIdEnableResponse, PutBackupsSchedulesByIdEnableError, GetBackupsTargetsData, GetBackupsTargetsResponse, GetBackupsTargetsError, PostBackupsTargetsData, PostBackupsTargetsResponse, PostBackupsTargetsError, DeleteBackupsTargetsByIdData, DeleteBackupsTargetsByIdError, GetBackupsTargetsByIdData, GetBackupsTargetsByIdResponse, GetBackupsTargetsByIdError, PutBackupsTargetsByIdData, PutBackupsTargetsByIdResponse, PutBackupsTargetsByIdError, DeleteBackupsByIdData, DeleteBackupsByIdError, GetBackupsByIdData, GetBackupsByIdResponse, GetBackupsByIdError, PostDummyData, PostDummyResponse, GetKeyProvidersData, GetKeyProvidersResponse, GetKeyProvidersError, PostKeyProvidersData, PostKeyProvidersResponse, PostKeyProvidersError, DeleteKeyProvidersByIdData, DeleteKeyProvidersByIdError, GetKeyProvidersByIdData, GetKeyProvidersByIdResponse, GetKeyProvidersByIdError, GetKeysData, GetKeysResponse, GetKeysError, PostKeysData, PostKeysResponse, PostKeysError, GetKeysAllData, GetKeysAllResponse, GetKeysAllError, GetKeysFilterData, GetKeysFilterResponse, GetKeysFilterError, DeleteKeysByIdData, DeleteKeysByIdError, GetKeysByIdData, GetKeysByIdResponse, GetKeysByIdError, PostKeysByKeyIdSignData, PostKeysByKeyIdSignResponse, PostKeysByKeyIdSignError, GetNetworksBesuData, GetNetworksBesuResponse, GetNetworksBesuError, PostNetworksBesuData, PostNetworksBesuResponse, PostNetworksBesuError, PostNetworksBesuImportData, PostNetworksBesuImportResponse, PostNetworksBesuImportError, DeleteNetworksBesuByIdData, DeleteNetworksBesuByIdError, GetNetworksBesuByIdData, GetNetworksBesuByIdResponse, GetNetworksBesuByIdError, GetNetworksFabricData, GetNetworksFabricResponse, GetNetworksFabricError, PostNetworksFabricData, PostNetworksFabricResponse, PostNetworksFabricError, GetNetworksFabricByNameByNameData, GetNetworksFabricByNameByNameResponse, GetNetworksFabricByNameByNameError, PostNetworksFabricImportData, PostNetworksFabricImportResponse, PostNetworksFabricImportError, PostNetworksFabricImportWithOrgData, PostNetworksFabricImportWithOrgResponse, PostNetworksFabricImportWithOrgError, DeleteNetworksFabricByIdData, DeleteNetworksFabricByIdError, GetNetworksFabricByIdData, GetNetworksFabricByIdResponse, GetNetworksFabricByIdError, PostNetworksFabricByIdAnchorPeersData, PostNetworksFabricByIdAnchorPeersResponse, PostNetworksFabricByIdAnchorPeersError, GetNetworksFabricByIdChannelConfigData, GetNetworksFabricByIdChannelConfigResponse, GetNetworksFabricByIdChannelConfigError, GetNetworksFabricByIdCurrentChannelConfigData, GetNetworksFabricByIdCurrentChannelConfigResponse, GetNetworksFabricByIdCurrentChannelConfigError, GetNetworksFabricByIdNodesData, GetNetworksFabricByIdNodesResponse, GetNetworksFabricByIdNodesError, PostNetworksFabricByIdNodesData, PostNetworksFabricByIdNodesResponse, PostNetworksFabricByIdNodesError, DeleteNetworksFabricByIdOrderersByOrdererIdData, DeleteNetworksFabricByIdOrderersByOrdererIdResponse, DeleteNetworksFabricByIdOrderersByOrdererIdError, PostNetworksFabricByIdOrderersByOrdererIdJoinData, PostNetworksFabricByIdOrderersByOrdererIdJoinResponse, PostNetworksFabricByIdOrderersByOrdererIdJoinError, PostNetworksFabricByIdOrderersByOrdererIdUnjoinData, PostNetworksFabricByIdOrderersByOrdererIdUnjoinResponse, PostNetworksFabricByIdOrderersByOrdererIdUnjoinError, GetNetworksFabricByIdOrganizationsByOrgIdConfigData, GetNetworksFabricByIdOrganizationsByOrgIdConfigResponse, GetNetworksFabricByIdOrganizationsByOrgIdConfigError, DeleteNetworksFabricByIdPeersByPeerIdData, DeleteNetworksFabricByIdPeersByPeerIdResponse, DeleteNetworksFabricByIdPeersByPeerIdError, PostNetworksFabricByIdPeersByPeerIdJoinData, PostNetworksFabricByIdPeersByPeerIdJoinResponse, PostNetworksFabricByIdPeersByPeerIdJoinError, PostNetworksFabricByIdPeersByPeerIdUnjoinData, PostNetworksFabricByIdPeersByPeerIdUnjoinResponse, PostNetworksFabricByIdPeersByPeerIdUnjoinError, PostNetworksFabricByIdReloadBlockData, PostNetworksFabricByIdReloadBlockResponse, PostNetworksFabricByIdReloadBlockError, GetNodesData, GetNodesResponse, GetNodesError, PostNodesData, PostNodesResponse, PostNodesError, GetNodesDefaultsBesuNodeData, GetNodesDefaultsBesuNodeResponse, GetNodesDefaultsBesuNodeError, GetNodesDefaultsFabricData, GetNodesDefaultsFabricResponse, GetNodesDefaultsFabricError, GetNodesDefaultsFabricOrdererData, GetNodesDefaultsFabricOrdererResponse, GetNodesDefaultsFabricOrdererError, GetNodesDefaultsFabricPeerData, GetNodesDefaultsFabricPeerResponse, GetNodesDefaultsFabricPeerError, GetNodesPlatformByPlatformData, GetNodesPlatformByPlatformResponse, GetNodesPlatformByPlatformError, DeleteNodesByIdData, DeleteNodesByIdError, GetNodesByIdData, GetNodesByIdResponse, GetNodesByIdError, GetNodesByIdEventsData, GetNodesByIdEventsResponse, GetNodesByIdEventsError, GetNodesByIdLogsData, GetNodesByIdLogsResponse, GetNodesByIdLogsError, PostNodesByIdRestartData, PostNodesByIdRestartResponse, PostNodesByIdRestartError, PostNodesByIdStartData, PostNodesByIdStartResponse, PostNodesByIdStartError, PostNodesByIdStopData, PostNodesByIdStopResponse, PostNodesByIdStopError, GetNotificationsProvidersData, GetNotificationsProvidersResponse, GetNotificationsProvidersError, PostNotificationsProvidersData, PostNotificationsProvidersResponse, PostNotificationsProvidersError, DeleteNotificationsProvidersByIdData, DeleteNotificationsProvidersByIdError, GetNotificationsProvidersByIdData, GetNotificationsProvidersByIdResponse, GetNotificationsProvidersByIdError, PutNotificationsProvidersByIdData, PutNotificationsProvidersByIdResponse, PutNotificationsProvidersByIdError, PostNotificationsProvidersByIdTestData, PostNotificationsProvidersByIdTestResponse, PostNotificationsProvidersByIdTestError, GetOrganizationsData, GetOrganizationsResponse, GetOrganizationsError, PostOrganizationsData, PostOrganizationsResponse, PostOrganizationsError, GetOrganizationsByMspidByMspidData, GetOrganizationsByMspidByMspidResponse, GetOrganizationsByMspidByMspidError, DeleteOrganizationsByIdData, DeleteOrganizationsByIdError, GetOrganizationsByIdData, GetOrganizationsByIdResponse, GetOrganizationsByIdError, PutOrganizationsByIdData, PutOrganizationsByIdResponse, PutOrganizationsByIdError } from './types.gen'; +import type { PostAuthLoginData, PostAuthLoginResponse, PostAuthLoginError, PostAuthLogoutData, PostAuthLogoutResponse, PostAuthLogoutError, GetAuthMeData, GetAuthMeResponse, GetAuthMeError, GetBackupsData, GetBackupsResponse, GetBackupsError, PostBackupsData, PostBackupsResponse, PostBackupsError, GetBackupsSchedulesData, GetBackupsSchedulesResponse, GetBackupsSchedulesError, PostBackupsSchedulesData, PostBackupsSchedulesResponse, PostBackupsSchedulesError, DeleteBackupsSchedulesByIdData, DeleteBackupsSchedulesByIdError, GetBackupsSchedulesByIdData, GetBackupsSchedulesByIdResponse, GetBackupsSchedulesByIdError, PutBackupsSchedulesByIdData, PutBackupsSchedulesByIdResponse, PutBackupsSchedulesByIdError, PutBackupsSchedulesByIdDisableData, PutBackupsSchedulesByIdDisableResponse, PutBackupsSchedulesByIdDisableError, PutBackupsSchedulesByIdEnableData, PutBackupsSchedulesByIdEnableResponse, PutBackupsSchedulesByIdEnableError, GetBackupsTargetsData, GetBackupsTargetsResponse, GetBackupsTargetsError, PostBackupsTargetsData, PostBackupsTargetsResponse, PostBackupsTargetsError, DeleteBackupsTargetsByIdData, DeleteBackupsTargetsByIdError, GetBackupsTargetsByIdData, GetBackupsTargetsByIdResponse, GetBackupsTargetsByIdError, PutBackupsTargetsByIdData, PutBackupsTargetsByIdResponse, PutBackupsTargetsByIdError, DeleteBackupsByIdData, DeleteBackupsByIdError, GetBackupsByIdData, GetBackupsByIdResponse, GetBackupsByIdError, PostDummyData, PostDummyResponse, GetKeyProvidersData, GetKeyProvidersResponse, GetKeyProvidersError, PostKeyProvidersData, PostKeyProvidersResponse, PostKeyProvidersError, DeleteKeyProvidersByIdData, DeleteKeyProvidersByIdError, GetKeyProvidersByIdData, GetKeyProvidersByIdResponse, GetKeyProvidersByIdError, GetKeysData, GetKeysResponse, GetKeysError, PostKeysData, PostKeysResponse, PostKeysError, GetKeysAllData, GetKeysAllResponse, GetKeysAllError, GetKeysFilterData, GetKeysFilterResponse, GetKeysFilterError, DeleteKeysByIdData, DeleteKeysByIdError, GetKeysByIdData, GetKeysByIdResponse, GetKeysByIdError, PostKeysByKeyIdSignData, PostKeysByKeyIdSignResponse, PostKeysByKeyIdSignError, GetNetworksBesuData, GetNetworksBesuResponse, GetNetworksBesuError, PostNetworksBesuData, PostNetworksBesuResponse, PostNetworksBesuError, PostNetworksBesuImportData, PostNetworksBesuImportResponse, PostNetworksBesuImportError, DeleteNetworksBesuByIdData, DeleteNetworksBesuByIdError, GetNetworksBesuByIdData, GetNetworksBesuByIdResponse, GetNetworksBesuByIdError, GetNetworksFabricData, GetNetworksFabricResponse, GetNetworksFabricError, PostNetworksFabricData, PostNetworksFabricResponse, PostNetworksFabricError, GetNetworksFabricByNameByNameData, GetNetworksFabricByNameByNameResponse, GetNetworksFabricByNameByNameError, PostNetworksFabricImportData, PostNetworksFabricImportResponse, PostNetworksFabricImportError, PostNetworksFabricImportWithOrgData, PostNetworksFabricImportWithOrgResponse, PostNetworksFabricImportWithOrgError, DeleteNetworksFabricByIdData, DeleteNetworksFabricByIdError, GetNetworksFabricByIdData, GetNetworksFabricByIdResponse, GetNetworksFabricByIdError, PostNetworksFabricByIdAnchorPeersData, PostNetworksFabricByIdAnchorPeersResponse, PostNetworksFabricByIdAnchorPeersError, GetNetworksFabricByIdBlocksData, GetNetworksFabricByIdBlocksResponse, GetNetworksFabricByIdBlocksError, GetNetworksFabricByIdBlocksByBlockNumData, GetNetworksFabricByIdBlocksByBlockNumResponse, GetNetworksFabricByIdBlocksByBlockNumError, GetNetworksFabricByIdChannelConfigData, GetNetworksFabricByIdChannelConfigResponse, GetNetworksFabricByIdChannelConfigError, GetNetworksFabricByIdCurrentChannelConfigData, GetNetworksFabricByIdCurrentChannelConfigResponse, GetNetworksFabricByIdCurrentChannelConfigError, GetNetworksFabricByIdInfoData, GetNetworksFabricByIdInfoResponse, GetNetworksFabricByIdInfoError, GetNetworksFabricByIdNodesData, GetNetworksFabricByIdNodesResponse, GetNetworksFabricByIdNodesError, PostNetworksFabricByIdNodesData, PostNetworksFabricByIdNodesResponse, PostNetworksFabricByIdNodesError, DeleteNetworksFabricByIdOrderersByOrdererIdData, DeleteNetworksFabricByIdOrderersByOrdererIdResponse, DeleteNetworksFabricByIdOrderersByOrdererIdError, PostNetworksFabricByIdOrderersByOrdererIdJoinData, PostNetworksFabricByIdOrderersByOrdererIdJoinResponse, PostNetworksFabricByIdOrderersByOrdererIdJoinError, PostNetworksFabricByIdOrderersByOrdererIdUnjoinData, PostNetworksFabricByIdOrderersByOrdererIdUnjoinResponse, PostNetworksFabricByIdOrderersByOrdererIdUnjoinError, PostNetworksFabricByIdOrganizationCrlData, PostNetworksFabricByIdOrganizationCrlResponse, PostNetworksFabricByIdOrganizationCrlError, GetNetworksFabricByIdOrganizationsByOrgIdConfigData, GetNetworksFabricByIdOrganizationsByOrgIdConfigResponse, GetNetworksFabricByIdOrganizationsByOrgIdConfigError, DeleteNetworksFabricByIdPeersByPeerIdData, DeleteNetworksFabricByIdPeersByPeerIdResponse, DeleteNetworksFabricByIdPeersByPeerIdError, PostNetworksFabricByIdPeersByPeerIdJoinData, PostNetworksFabricByIdPeersByPeerIdJoinResponse, PostNetworksFabricByIdPeersByPeerIdJoinError, PostNetworksFabricByIdPeersByPeerIdUnjoinData, PostNetworksFabricByIdPeersByPeerIdUnjoinResponse, PostNetworksFabricByIdPeersByPeerIdUnjoinError, PostNetworksFabricByIdReloadBlockData, PostNetworksFabricByIdReloadBlockResponse, PostNetworksFabricByIdReloadBlockError, GetNetworksFabricByIdTransactionsByTxIdData, GetNetworksFabricByIdTransactionsByTxIdResponse, GetNetworksFabricByIdTransactionsByTxIdError, PostNetworksFabricByIdUpdateConfigData, PostNetworksFabricByIdUpdateConfigResponse, PostNetworksFabricByIdUpdateConfigError, GetNodesData, GetNodesResponse, GetNodesError, PostNodesData, PostNodesResponse, PostNodesError, GetNodesDefaultsBesuNodeData, GetNodesDefaultsBesuNodeResponse, GetNodesDefaultsBesuNodeError, GetNodesDefaultsFabricData, GetNodesDefaultsFabricResponse, GetNodesDefaultsFabricError, GetNodesDefaultsFabricOrdererData, GetNodesDefaultsFabricOrdererResponse, GetNodesDefaultsFabricOrdererError, GetNodesDefaultsFabricPeerData, GetNodesDefaultsFabricPeerResponse, GetNodesDefaultsFabricPeerError, GetNodesPlatformByPlatformData, GetNodesPlatformByPlatformResponse, GetNodesPlatformByPlatformError, DeleteNodesByIdData, DeleteNodesByIdError, GetNodesByIdData, GetNodesByIdResponse, GetNodesByIdError, PutNodesByIdData, PutNodesByIdResponse, PutNodesByIdError, PostNodesByIdCertificatesRenewData, PostNodesByIdCertificatesRenewResponse, PostNodesByIdCertificatesRenewError, GetNodesByIdChannelsData, GetNodesByIdChannelsResponse, GetNodesByIdChannelsError, GetNodesByIdEventsData, GetNodesByIdEventsResponse, GetNodesByIdEventsError, GetNodesByIdLogsData, GetNodesByIdLogsResponse, GetNodesByIdLogsError, PostNodesByIdRestartData, PostNodesByIdRestartResponse, PostNodesByIdRestartError, PostNodesByIdStartData, PostNodesByIdStartResponse, PostNodesByIdStartError, PostNodesByIdStopData, PostNodesByIdStopResponse, PostNodesByIdStopError, GetNotificationsProvidersData, GetNotificationsProvidersResponse, GetNotificationsProvidersError, PostNotificationsProvidersData, PostNotificationsProvidersResponse, PostNotificationsProvidersError, DeleteNotificationsProvidersByIdData, DeleteNotificationsProvidersByIdError, GetNotificationsProvidersByIdData, GetNotificationsProvidersByIdResponse, GetNotificationsProvidersByIdError, PutNotificationsProvidersByIdData, PutNotificationsProvidersByIdResponse, PutNotificationsProvidersByIdError, PostNotificationsProvidersByIdTestData, PostNotificationsProvidersByIdTestResponse, PostNotificationsProvidersByIdTestError, GetOrganizationsData, GetOrganizationsResponse, GetOrganizationsError, PostOrganizationsData, PostOrganizationsResponse, PostOrganizationsError, GetOrganizationsByMspidByMspidData, GetOrganizationsByMspidByMspidResponse, GetOrganizationsByMspidByMspidError, DeleteOrganizationsByIdData, DeleteOrganizationsByIdError, GetOrganizationsByIdData, GetOrganizationsByIdResponse, GetOrganizationsByIdError, PutOrganizationsByIdData, PutOrganizationsByIdResponse, PutOrganizationsByIdError, GetOrganizationsByIdCrlData, GetOrganizationsByIdCrlResponse, GetOrganizationsByIdCrlError, PostOrganizationsByIdCrlRevokePemData, PostOrganizationsByIdCrlRevokePemResponse, PostOrganizationsByIdCrlRevokePemError, DeleteOrganizationsByIdCrlRevokeSerialData, DeleteOrganizationsByIdCrlRevokeSerialResponse, DeleteOrganizationsByIdCrlRevokeSerialError, PostOrganizationsByIdCrlRevokeSerialData, PostOrganizationsByIdCrlRevokeSerialResponse, PostOrganizationsByIdCrlRevokeSerialError, GetOrganizationsByIdRevokedCertificatesData, GetOrganizationsByIdRevokedCertificatesResponse, GetOrganizationsByIdRevokedCertificatesError, GetSettingsData, GetSettingsResponse, PostSettingsData, PostSettingsResponse } from './types.gen'; export const client = createClient(createConfig()); @@ -555,6 +555,28 @@ export const postNetworksFabricByIdAnchorPeers = (options: Options) => { + return (options?.client ?? client).get({ + url: '/networks/fabric/{id}/blocks', + ...options + }); +}; + +/** + * Get transactions from a specific block + * Get all transactions from a specific block in a Fabric network + */ +export const getNetworksFabricByIdBlocksByBlockNum = (options: Options) => { + return (options?.client ?? client).get({ + url: '/networks/fabric/{id}/blocks/{blockNum}', + ...options + }); +}; + /** * Get Fabric network channel configuration * Retrieve the channel configuration for a Fabric network @@ -577,6 +599,17 @@ export const getNetworksFabricByIdCurrentChannelConfig = (options: Options) => { + return (options?.client ?? client).get({ + url: '/networks/fabric/{id}/info', + ...options + }); +}; + /** * Get network nodes * Get all nodes associated with a network @@ -636,6 +669,21 @@ export const postNetworksFabricByIdOrderersByOrdererIdUnjoin = (options: Options) => { + return (options?.client ?? client).post({ + url: '/networks/fabric/{id}/organization-crl', + ...options, + headers: { + 'Content-Type': 'application/json', + ...options?.headers + } + }); +}; + /** * Get network configuration * Get the network configuration as YAML @@ -691,6 +739,43 @@ export const postNetworksFabricByIdReloadBlock = (options: Options) => { + return (options?.client ?? client).get({ + url: '/networks/fabric/{id}/transactions/{txId}', + ...options + }); +}; + +/** + * Prepare a config update for a Fabric network + * Prepare a config update proposal for a Fabric network using the provided operations. + * The following operation types are supported: + * - add_org: Add a new organization to the channel + * - remove_org: Remove an organization from the channel + * - update_org_msp: Update an organization's MSP configuration + * - set_anchor_peers: Set anchor peers for an organization + * - add_consenter: Add a new consenter to the orderer + * - remove_consenter: Remove a consenter from the orderer + * - update_consenter: Update a consenter in the orderer + * - update_etcd_raft_options: Update etcd raft options for the orderer + * - update_batch_size: Update batch size for the orderer + * - update_batch_timeout: Update batch timeout for the orderer + */ +export const postNetworksFabricByIdUpdateConfig = (options: Options) => { + return (options?.client ?? client).post({ + url: '/networks/fabric/{id}/update-config', + ...options, + headers: { + 'Content-Type': 'application/json', + ...options?.headers + } + }); +}; + /** * List all nodes * Get a paginated list of nodes with optional platform filter @@ -794,6 +879,43 @@ export const getNodesById = (options: Opti }); }; +/** + * Update a node + * Updates an existing node's configuration based on its type + */ +export const putNodesById = (options: Options) => { + return (options?.client ?? client).put({ + url: '/nodes/{id}', + ...options, + headers: { + 'Content-Type': 'application/json', + ...options?.headers + } + }); +}; + +/** + * Renew node certificates + * Renews the TLS and signing certificates for a Fabric node + */ +export const postNodesByIdCertificatesRenew = (options: Options) => { + return (options?.client ?? client).post({ + url: '/nodes/{id}/certificates/renew', + ...options + }); +}; + +/** + * Get channels for a Fabric node + * Retrieves all channels for a specific Fabric node + */ +export const getNodesByIdChannels = (options: Options) => { + return (options?.client ?? client).get({ + url: '/nodes/{id}/channels', + ...options + }); +}; + /** * Get node events * Get a paginated list of events for a specific node @@ -999,4 +1121,97 @@ export const putOrganizationsById = (optio ...options?.headers } }); +}; + +/** + * Get organization's CRL + * Get the current Certificate Revocation List for the organization + */ +export const getOrganizationsByIdCrl = (options: Options) => { + return (options?.client ?? client).get({ + url: '/organizations/{id}/crl', + ...options + }); +}; + +/** + * Revoke a certificate using PEM data + * Add a certificate to the organization's CRL using its PEM encoded data + */ +export const postOrganizationsByIdCrlRevokePem = (options: Options) => { + return (options?.client ?? client).post({ + url: '/organizations/{id}/crl/revoke/pem', + ...options, + headers: { + 'Content-Type': 'application/json', + ...options?.headers + } + }); +}; + +/** + * Delete a revoked certificate using its serial number + * Remove a certificate from the organization's CRL using its serial number + */ +export const deleteOrganizationsByIdCrlRevokeSerial = (options: Options) => { + return (options?.client ?? client).delete({ + url: '/organizations/{id}/crl/revoke/serial', + ...options, + headers: { + 'Content-Type': 'application/json', + ...options?.headers + } + }); +}; + +/** + * Revoke a certificate using its serial number + * Add a certificate to the organization's CRL using its serial number + */ +export const postOrganizationsByIdCrlRevokeSerial = (options: Options) => { + return (options?.client ?? client).post({ + url: '/organizations/{id}/crl/revoke/serial', + ...options, + headers: { + 'Content-Type': 'application/json', + ...options?.headers + } + }); +}; + +/** + * Get organization's revoked certificates + * Get all revoked certificates for the organization + */ +export const getOrganizationsByIdRevokedCertificates = (options: Options) => { + return (options?.client ?? client).get({ + url: '/organizations/{id}/revoked-certificates', + ...options + }); +}; + +/** + * Get the default setting + * Get the default setting's details + */ +export const getSettings = (options?: Options) => { + return (options?.client ?? client).get({ + url: '/settings', + ...options + }); +}; + +/** + * Create or update the default setting + * Create or update the default setting with the provided configuration + */ +export const postSettings = (options: Options) => { + return (options?.client ?? client).post({ + url: '/settings', + ...options, + headers: { + 'Content-Type': 'application/json', + ...options?.headers + } + }); }; \ No newline at end of file diff --git a/web/src/api/client/types.gen.ts b/web/src/api/client/types.gen.ts index bd6d738..d796ff4 100644 --- a/web/src/api/client/types.gen.ts +++ b/web/src/api/client/types.gen.ts @@ -48,10 +48,6 @@ export type AuthUserResponse = { username?: string; }; -export type CryptoX509ExtKeyUsage = 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13; - -export type CryptoX509KeyUsage = 1 | 2 | 4 | 8 | 16 | 32 | 64 | 128 | 256; - export type GithubComChainlaunchChainlaunchPkgNetworksHttpErrorResponse = { code?: number; error?: string; @@ -69,6 +65,13 @@ export type HandlerCreateOrganizationRequest = { providerId?: number; }; +export type HandlerDeleteRevokedCertificateRequest = { + /** + * Hex string of the serial number + */ + serialNumber?: string; +}; + export type HandlerOrganizationResponse = { createdAt?: string; description?: string; @@ -83,6 +86,28 @@ export type HandlerOrganizationResponse = { updatedAt?: string; }; +export type HandlerRevokeCertificateByPemRequest = { + /** + * PEM encoded certificate + */ + certificate?: string; + revocationReason?: number; +}; + +export type HandlerRevokeCertificateBySerialRequest = { + revocationReason?: number; + /** + * Hex string of the serial number + */ + serialNumber?: string; +}; + +export type HandlerRevokedCertificateResponse = { + reason?: number; + revocationTime?: string; + serialNumber?: string; +}; + export type HandlerUpdateOrganizationRequest = { description?: string; }; @@ -156,7 +181,7 @@ export type HttpBesuNetworkResponse = { config?: Array; createdAt?: string; description?: string; - genesisConfig?: Array; + genesisConfig?: string; id?: number; name?: string; platform?: string; @@ -164,6 +189,27 @@ export type HttpBesuNetworkResponse = { updatedAt?: string; }; +export type HttpBesuNodeDefaultsResponse = { + defaults?: Array; + nodeCount?: number; +}; + +export type HttpBlockListResponse = { + blocks?: Array; + total?: number; +}; + +export type HttpBlockTransactionsResponse = { + block?: ServiceBlock; + transactions?: Array; +}; + +export type HttpChainInfoResponse = { + currentBlockHash?: string; + height?: number; + previousBlockHash?: string; +}; + export type HttpChannelConfigResponse = { config?: { [key: string]: unknown; @@ -171,6 +217,61 @@ export type HttpChannelConfigResponse = { name?: string; }; +export type HttpChannelResponse = { + blockNum?: number; + createdAt?: string; + name?: string; +}; + +/** + * A single configuration update operation + */ +export type HttpConfigUpdateOperationRequest = { + /** + * Payload contains the operation-specific data + * The structure depends on the operation type: + * - add_org: AddOrgPayload + * - remove_org: RemoveOrgPayload + * - update_org_msp: UpdateOrgMSPPayload + * - set_anchor_peers: SetAnchorPeersPayload + * - add_consenter: AddConsenterPayload + * - remove_consenter: RemoveConsenterPayload + * - update_consenter: UpdateConsenterPayload + * - update_etcd_raft_options: UpdateEtcdRaftOptionsPayload + * - update_batch_size: UpdateBatchSizePayload + * - update_batch_timeout: UpdateBatchTimeoutPayload + * @Description The payload for the configuration update operation + * @Description Can be one of: + * @Description - AddOrgPayload when type is "add_org" + * @Description - RemoveOrgPayload when type is "remove_org" + * @Description - UpdateOrgMSPPayload when type is "update_org_msp" + * @Description - SetAnchorPeersPayload when type is "set_anchor_peers" + * @Description - AddConsenterPayload when type is "add_consenter" + * @Description - RemoveConsenterPayload when type is "remove_consenter" + * @Description - UpdateConsenterPayload when type is "update_consenter" + * @Description - UpdateEtcdRaftOptionsPayload when type is "update_etcd_raft_options" + * @Description - UpdateBatchSizePayload when type is "update_batch_size" + * @Description - UpdateBatchTimeoutPayload when type is "update_batch_timeout" + */ + payload: Array; + /** + * Type is the type of configuration update operation + * enum: add_org,remove_org,update_org_msp,set_anchor_peers,add_consenter,remove_consenter,update_consenter,update_etcd_raft_options,update_batch_size,update_batch_timeout + */ + type: 'add_org' | 'remove_org' | 'update_org_msp' | 'set_anchor_peers' | 'add_consenter' | 'remove_consenter' | 'update_consenter' | 'update_etcd_raft_options' | 'update_batch_size' | 'update_batch_timeout'; +}; + +export type HttpConfigUpdateResponse = { + channel_name?: string; + created_at?: string; + created_by?: string; + id?: string; + network_id?: number; + operations?: Array; + preview_json?: string; + status?: string; +}; + export type HttpConsenterConfig = { id: string; }; @@ -456,6 +557,11 @@ export type HttpNetworkResponse = { updatedAt?: string; }; +export type HttpNodeChannelsResponse = { + channels?: Array; + nodeId?: number; +}; + export type HttpNodeEventResponse = { created_at?: string; data?: unknown; @@ -468,6 +574,7 @@ export type HttpNodeResponse = { besuNode?: ServiceBesuNodeProperties; createdAt?: string; endpoint?: string; + errorMessage?: string; fabricOrderer?: ServiceFabricOrdererProperties; fabricPeer?: ServiceFabricPeerProperties; id?: number; @@ -550,6 +657,10 @@ export type HttpTestProviderResponse = { testedAt?: string; }; +export type HttpTransactionResponse = { + transaction?: ServiceTransaction; +}; + export type HttpUpdateBackupScheduleRequest = { cronExpression: string; description?: string; @@ -584,6 +695,20 @@ export type HttpUpdateBatchTimeoutPayload = { timeout: string; }; +export type HttpUpdateBesuNodeRequest = { + bootnodes?: Array; + env?: { + [key: string]: string; + }; + externalIp?: string; + internalIp?: string; + networkId: number; + p2pHost: string; + p2pPort: number; + rpcHost: string; + rpcPort: number; +}; + export type HttpUpdateConsenterPayload = { client_tls_cert: string; host: string; @@ -601,12 +726,64 @@ export type HttpUpdateEtcdRaftOptionsPayload = { tick_interval: string; }; +export type HttpUpdateFabricNetworkRequest = { + operations: Array; +}; + +export type HttpUpdateFabricOrdererRequest = { + adminAddress?: string; + domainNames?: Array; + env?: { + [key: string]: string; + }; + externalEndpoint?: string; + listenAddress?: string; + operationsListenAddress?: string; + version?: string; +}; + +export type HttpUpdateFabricPeerRequest = { + addressOverrides?: Array; + chaincodeAddress?: string; + domainNames?: Array; + env?: { + [key: string]: string; + }; + eventsAddress?: string; + externalEndpoint?: string; + listenAddress?: string; + operationsListenAddress?: string; + version?: string; +}; + +export type HttpUpdateNodeRequest = { + besuNode?: HttpUpdateBesuNodeRequest; + blockchainPlatform?: TypesBlockchainPlatform; + fabricOrderer?: HttpUpdateFabricOrdererRequest; + /** + * Platform-specific configurations + */ + fabricPeer?: HttpUpdateFabricPeerRequest; + /** + * Common fields + */ + name?: string; +}; + export type HttpUpdateOrgMspPayload = { msp_id: string; root_certs: Array; tls_root_certs: Array; }; +export type HttpUpdateOrganizationCrlRequest = { + organizationId: number; +}; + +export type HttpUpdateOrganizationCrlResponse = { + transactionId?: string; +}; + export type HttpUpdateProviderRequest = { config: unknown; isDefault?: boolean; @@ -623,10 +800,10 @@ export type ModelsCertificateRequest = { country?: Array; dnsNames?: Array; emailAddresses?: Array; - extKeyUsage?: Array; + extKeyUsage?: Array; ipAddresses?: Array>; isCA?: boolean; - keyUsage?: CryptoX509KeyUsage; + keyUsage?: X509KeyUsage; locality?: Array; organization?: Array; organizationalUnit?: Array; @@ -715,6 +892,7 @@ export type ModelsKeyResponse = { publicKey?: string; sha1Fingerprint?: string; sha256Fingerprint?: string; + signingKeyID?: number; status?: string; }; @@ -751,15 +929,20 @@ export type ResponseResponse = { }; export type ServiceBesuNodeDefaults = { - externalIP?: string; - internalIP?: string; + environmentVariables?: { + [key: string]: string; + }; + externalIp?: string; + internalIp?: string; mode?: ServiceMode; - networkId?: number; - p2pAddress?: string; - rpcAddress?: string; + p2pHost?: string; + p2pPort?: number; + rpcHost?: string; + rpcPort?: number; }; export type ServiceBesuNodeProperties = { + bootNodes?: Array; enodeUrl?: string; externalIp?: string; internalIp?: string; @@ -773,6 +956,20 @@ export type ServiceBesuNodeProperties = { p2pPort?: number; rpcHost?: string; rpcPort?: number; + version?: string; +}; + +export type ServiceBlock = { + data?: Array; + hash?: string; + number?: number; + previous_hash?: string; + timestamp?: string; + tx_count?: number; +}; + +export type ServiceCreateSettingParams = { + config?: ServiceSettingConfig; }; export type ServiceFabricOrdererProperties = { @@ -796,9 +993,11 @@ export type ServiceFabricOrdererProperties = { tlsCaCert?: string; tlsCert?: string; tlsKeyId?: number; + version?: string; }; export type ServiceFabricPeerProperties = { + addressOverrides?: Array; chaincodeAddress?: string; domainNames?: Array; eventsAddress?: string; @@ -820,6 +1019,7 @@ export type ServiceFabricPeerProperties = { tlsCaCert?: string; tlsCert?: string; tlsKeyId?: number; + version?: string; }; export type ServiceMode = 'service' | 'docker'; @@ -842,6 +1042,7 @@ export type ServiceNode = { */ deploymentConfig?: unknown; endpoint?: string; + errorMessage?: string; id?: number; mspId?: string; name?: string; @@ -876,6 +1077,34 @@ export type ServiceNodesDefaultsResult = { peers?: Array; }; +export type ServiceSetting = { + config?: ServiceSettingConfig; + created_at?: string; + id?: number; + updated_at?: string; +}; + +export type ServiceSettingConfig = { + besuTemplateCMD?: string; + ordererTemplateCMD?: string; + peerTemplateCMD?: string; +}; + +export type ServiceTransaction = { + block_number?: number; + creator?: string; + payload?: Array; + timestamp?: string; + tx_id?: string; + type?: string; +}; + +export type TypesAddressOverride = { + from?: string; + tlsCACert?: string; + to?: string; +}; + export type TypesBesuNodeConfig = { bootNodes?: Array; env?: { @@ -902,6 +1131,10 @@ export type TypesBesuNodeConfig = { export type TypesBlockchainPlatform = 'FABRIC' | 'BESU'; export type TypesFabricOrdererConfig = { + /** + * @Description Address overrides for the orderer + */ + addressOverrides?: Array; adminAddress?: string; domainNames?: Array; env?: { @@ -931,6 +1164,10 @@ export type TypesFabricOrdererConfig = { * Configuration for creating a new Fabric peer node */ export type TypesFabricPeerConfig = { + /** + * @Description Address overrides for the peer + */ + addressOverrides?: Array; /** * @Description Chaincode listen address */ @@ -973,6 +1210,10 @@ export type TypesFabricPeerConfig = { * @Description Operations listen address */ operationsListenAddress?: string; + /** + * @Description Orderer address overrides for the peer + */ + ordererAddressOverrides?: Array; /** * @Description Organization ID that owns this peer */ @@ -987,10 +1228,25 @@ export type TypesFabricPeerConfig = { version?: string; }; -export type TypesNodeStatus = 'PENDING' | 'RUNNING' | 'STOPPED' | 'STOPPING' | 'STARTING' | 'ERROR'; +export type TypesNodeStatus = 'PENDING' | 'RUNNING' | 'STOPPED' | 'STOPPING' | 'STARTING' | 'UPDATING' | 'ERROR'; export type TypesNodeType = 'FABRIC_PEER' | 'FABRIC_ORDERER' | 'BESU_FULLNODE'; +export type TypesOrdererAddressOverride = { + /** + * @Description Original orderer address + */ + from: string; + /** + * @Description TLS CA certificate in PEM format + */ + tlsCACert: string; + /** + * @Description New orderer address to use + */ + to: string; +}; + export type UrlUrl = { /** * append a query ('?') even if RawQuery is empty @@ -1039,6 +1295,10 @@ export type UrlUserinfo = { [key: string]: unknown; }; +export type X509ExtKeyUsage = 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13; + +export type X509KeyUsage = 1 | 2 | 4 | 8 | 16 | 32 | 64 | 128 | 256; + export type PostAuthLoginData = { /** * Login credentials @@ -2235,9 +2495,9 @@ export type PostNetworksBesuError = PostNetworksBesuErrors[keyof PostNetworksBes export type PostNetworksBesuResponses = { /** - * Created + * OK */ - 201: HttpBesuNetworkResponse; + 200: HttpBesuNetworkResponse; }; export type PostNetworksBesuResponse = PostNetworksBesuResponses[keyof PostNetworksBesuResponses]; @@ -2631,6 +2891,99 @@ export type PostNetworksFabricByIdAnchorPeersResponses = { export type PostNetworksFabricByIdAnchorPeersResponse = PostNetworksFabricByIdAnchorPeersResponses[keyof PostNetworksFabricByIdAnchorPeersResponses]; +export type GetNetworksFabricByIdBlocksData = { + body?: never; + path: { + /** + * Network ID + */ + id: number; + }; + query?: { + /** + * Number of blocks to return (default: 10) + */ + limit?: number; + /** + * Number of blocks to skip (default: 0) + */ + offset?: number; + /** + * Get blocks in reverse order (default: false) + */ + reverse?: boolean; + }; + url: '/networks/fabric/{id}/blocks'; +}; + +export type GetNetworksFabricByIdBlocksErrors = { + /** + * Bad Request + */ + 400: GithubComChainlaunchChainlaunchPkgNetworksHttpErrorResponse; + /** + * Not Found + */ + 404: GithubComChainlaunchChainlaunchPkgNetworksHttpErrorResponse; + /** + * Internal Server Error + */ + 500: GithubComChainlaunchChainlaunchPkgNetworksHttpErrorResponse; +}; + +export type GetNetworksFabricByIdBlocksError = GetNetworksFabricByIdBlocksErrors[keyof GetNetworksFabricByIdBlocksErrors]; + +export type GetNetworksFabricByIdBlocksResponses = { + /** + * OK + */ + 200: HttpBlockListResponse; +}; + +export type GetNetworksFabricByIdBlocksResponse = GetNetworksFabricByIdBlocksResponses[keyof GetNetworksFabricByIdBlocksResponses]; + +export type GetNetworksFabricByIdBlocksByBlockNumData = { + body?: never; + path: { + /** + * Network ID + */ + id: number; + /** + * Block Number + */ + blockNum: number; + }; + query?: never; + url: '/networks/fabric/{id}/blocks/{blockNum}'; +}; + +export type GetNetworksFabricByIdBlocksByBlockNumErrors = { + /** + * Bad Request + */ + 400: GithubComChainlaunchChainlaunchPkgNetworksHttpErrorResponse; + /** + * Not Found + */ + 404: GithubComChainlaunchChainlaunchPkgNetworksHttpErrorResponse; + /** + * Internal Server Error + */ + 500: GithubComChainlaunchChainlaunchPkgNetworksHttpErrorResponse; +}; + +export type GetNetworksFabricByIdBlocksByBlockNumError = GetNetworksFabricByIdBlocksByBlockNumErrors[keyof GetNetworksFabricByIdBlocksByBlockNumErrors]; + +export type GetNetworksFabricByIdBlocksByBlockNumResponses = { + /** + * OK + */ + 200: HttpBlockTransactionsResponse; +}; + +export type GetNetworksFabricByIdBlocksByBlockNumResponse = GetNetworksFabricByIdBlocksByBlockNumResponses[keyof GetNetworksFabricByIdBlocksByBlockNumResponses]; + export type GetNetworksFabricByIdChannelConfigData = { body?: never; path: { @@ -2699,6 +3052,44 @@ export type GetNetworksFabricByIdCurrentChannelConfigResponses = { export type GetNetworksFabricByIdCurrentChannelConfigResponse = GetNetworksFabricByIdCurrentChannelConfigResponses[keyof GetNetworksFabricByIdCurrentChannelConfigResponses]; +export type GetNetworksFabricByIdInfoData = { + body?: never; + path: { + /** + * Network ID + */ + id: number; + }; + query?: never; + url: '/networks/fabric/{id}/info'; +}; + +export type GetNetworksFabricByIdInfoErrors = { + /** + * Bad Request + */ + 400: GithubComChainlaunchChainlaunchPkgNetworksHttpErrorResponse; + /** + * Not Found + */ + 404: GithubComChainlaunchChainlaunchPkgNetworksHttpErrorResponse; + /** + * Internal Server Error + */ + 500: GithubComChainlaunchChainlaunchPkgNetworksHttpErrorResponse; +}; + +export type GetNetworksFabricByIdInfoError = GetNetworksFabricByIdInfoErrors[keyof GetNetworksFabricByIdInfoErrors]; + +export type GetNetworksFabricByIdInfoResponses = { + /** + * OK + */ + 200: HttpChainInfoResponse; +}; + +export type GetNetworksFabricByIdInfoResponse = GetNetworksFabricByIdInfoResponses[keyof GetNetworksFabricByIdInfoResponses]; + export type GetNetworksFabricByIdNodesData = { body?: never; path: { @@ -2888,23 +3279,22 @@ export type PostNetworksFabricByIdOrderersByOrdererIdUnjoinResponses = { export type PostNetworksFabricByIdOrderersByOrdererIdUnjoinResponse = PostNetworksFabricByIdOrderersByOrdererIdUnjoinResponses[keyof PostNetworksFabricByIdOrderersByOrdererIdUnjoinResponses]; -export type GetNetworksFabricByIdOrganizationsByOrgIdConfigData = { - body?: never; +export type PostNetworksFabricByIdOrganizationCrlData = { + /** + * Organization CRL update request + */ + body: HttpUpdateOrganizationCrlRequest; path: { /** * Network ID */ id: number; - /** - * Organization ID - */ - orgId: number; }; query?: never; - url: '/networks/fabric/{id}/organizations/{orgId}/config'; + url: '/networks/fabric/{id}/organization-crl'; }; -export type GetNetworksFabricByIdOrganizationsByOrgIdConfigErrors = { +export type PostNetworksFabricByIdOrganizationCrlErrors = { /** * Bad Request */ @@ -2919,9 +3309,51 @@ export type GetNetworksFabricByIdOrganizationsByOrgIdConfigErrors = { 500: GithubComChainlaunchChainlaunchPkgNetworksHttpErrorResponse; }; -export type GetNetworksFabricByIdOrganizationsByOrgIdConfigError = GetNetworksFabricByIdOrganizationsByOrgIdConfigErrors[keyof GetNetworksFabricByIdOrganizationsByOrgIdConfigErrors]; +export type PostNetworksFabricByIdOrganizationCrlError = PostNetworksFabricByIdOrganizationCrlErrors[keyof PostNetworksFabricByIdOrganizationCrlErrors]; -export type GetNetworksFabricByIdOrganizationsByOrgIdConfigResponses = { +export type PostNetworksFabricByIdOrganizationCrlResponses = { + /** + * OK + */ + 200: HttpUpdateOrganizationCrlResponse; +}; + +export type PostNetworksFabricByIdOrganizationCrlResponse = PostNetworksFabricByIdOrganizationCrlResponses[keyof PostNetworksFabricByIdOrganizationCrlResponses]; + +export type GetNetworksFabricByIdOrganizationsByOrgIdConfigData = { + body?: never; + path: { + /** + * Network ID + */ + id: number; + /** + * Organization ID + */ + orgId: number; + }; + query?: never; + url: '/networks/fabric/{id}/organizations/{orgId}/config'; +}; + +export type GetNetworksFabricByIdOrganizationsByOrgIdConfigErrors = { + /** + * Bad Request + */ + 400: GithubComChainlaunchChainlaunchPkgNetworksHttpErrorResponse; + /** + * Not Found + */ + 404: GithubComChainlaunchChainlaunchPkgNetworksHttpErrorResponse; + /** + * Internal Server Error + */ + 500: GithubComChainlaunchChainlaunchPkgNetworksHttpErrorResponse; +}; + +export type GetNetworksFabricByIdOrganizationsByOrgIdConfigError = GetNetworksFabricByIdOrganizationsByOrgIdConfigErrors[keyof GetNetworksFabricByIdOrganizationsByOrgIdConfigErrors]; + +export type GetNetworksFabricByIdOrganizationsByOrgIdConfigResponses = { /** * Network configuration YAML */ @@ -3090,6 +3522,85 @@ export type PostNetworksFabricByIdReloadBlockResponses = { export type PostNetworksFabricByIdReloadBlockResponse = PostNetworksFabricByIdReloadBlockResponses[keyof PostNetworksFabricByIdReloadBlockResponses]; +export type GetNetworksFabricByIdTransactionsByTxIdData = { + body?: never; + path: { + /** + * Network ID + */ + id: number; + /** + * Transaction ID + */ + txId: string; + }; + query?: never; + url: '/networks/fabric/{id}/transactions/{txId}'; +}; + +export type GetNetworksFabricByIdTransactionsByTxIdErrors = { + /** + * Bad Request + */ + 400: GithubComChainlaunchChainlaunchPkgNetworksHttpErrorResponse; + /** + * Not Found + */ + 404: GithubComChainlaunchChainlaunchPkgNetworksHttpErrorResponse; + /** + * Internal Server Error + */ + 500: GithubComChainlaunchChainlaunchPkgNetworksHttpErrorResponse; +}; + +export type GetNetworksFabricByIdTransactionsByTxIdError = GetNetworksFabricByIdTransactionsByTxIdErrors[keyof GetNetworksFabricByIdTransactionsByTxIdErrors]; + +export type GetNetworksFabricByIdTransactionsByTxIdResponses = { + /** + * OK + */ + 200: HttpTransactionResponse; +}; + +export type GetNetworksFabricByIdTransactionsByTxIdResponse = GetNetworksFabricByIdTransactionsByTxIdResponses[keyof GetNetworksFabricByIdTransactionsByTxIdResponses]; + +export type PostNetworksFabricByIdUpdateConfigData = { + /** + * Config update operations + */ + body: HttpUpdateFabricNetworkRequest; + path: { + /** + * Network ID + */ + id: number; + }; + query?: never; + url: '/networks/fabric/{id}/update-config'; +}; + +export type PostNetworksFabricByIdUpdateConfigErrors = { + /** + * Bad Request + */ + 400: GithubComChainlaunchChainlaunchPkgNetworksHttpErrorResponse; + /** + * Internal Server Error + */ + 500: GithubComChainlaunchChainlaunchPkgNetworksHttpErrorResponse; +}; + +export type PostNetworksFabricByIdUpdateConfigError = PostNetworksFabricByIdUpdateConfigErrors[keyof PostNetworksFabricByIdUpdateConfigErrors]; + +export type PostNetworksFabricByIdUpdateConfigResponses = { + /** + * OK + */ + 200: HttpConfigUpdateResponse; +}; + +export type PostNetworksFabricByIdUpdateConfigResponse = PostNetworksFabricByIdUpdateConfigResponses[keyof PostNetworksFabricByIdUpdateConfigResponses]; + export type GetNodesData = { body?: never; path?: never; @@ -3167,7 +3678,12 @@ export type PostNodesResponse = PostNodesResponses[keyof PostNodesResponses]; export type GetNodesDefaultsBesuNodeData = { body?: never; path?: never; - query?: never; + query?: { + /** + * Number of Besu nodes + */ + besuNodes?: number; + }; url: '/nodes/defaults/besu-node'; }; @@ -3184,7 +3700,7 @@ export type GetNodesDefaultsBesuNodeResponses = { /** * OK */ - 200: ServiceBesuNodeDefaults; + 200: Array; }; export type GetNodesDefaultsBesuNodeResponse = GetNodesDefaultsBesuNodeResponses[keyof GetNodesDefaultsBesuNodeResponses]; @@ -3398,6 +3914,123 @@ export type GetNodesByIdResponses = { export type GetNodesByIdResponse = GetNodesByIdResponses[keyof GetNodesByIdResponses]; +export type PutNodesByIdData = { + /** + * Update node request + */ + body: HttpUpdateNodeRequest; + path: { + /** + * Node ID + */ + id: number; + }; + query?: never; + url: '/nodes/{id}'; +}; + +export type PutNodesByIdErrors = { + /** + * Validation error + */ + 400: ResponseErrorResponse; + /** + * Node not found + */ + 404: ResponseErrorResponse; + /** + * Internal server error + */ + 500: ResponseErrorResponse; +}; + +export type PutNodesByIdError = PutNodesByIdErrors[keyof PutNodesByIdErrors]; + +export type PutNodesByIdResponses = { + /** + * OK + */ + 200: HttpNodeResponse; +}; + +export type PutNodesByIdResponse = PutNodesByIdResponses[keyof PutNodesByIdResponses]; + +export type PostNodesByIdCertificatesRenewData = { + body?: never; + path: { + /** + * Node ID + */ + id: number; + }; + query?: never; + url: '/nodes/{id}/certificates/renew'; +}; + +export type PostNodesByIdCertificatesRenewErrors = { + /** + * Validation error + */ + 400: ResponseErrorResponse; + /** + * Node not found + */ + 404: ResponseErrorResponse; + /** + * Internal server error + */ + 500: ResponseErrorResponse; +}; + +export type PostNodesByIdCertificatesRenewError = PostNodesByIdCertificatesRenewErrors[keyof PostNodesByIdCertificatesRenewErrors]; + +export type PostNodesByIdCertificatesRenewResponses = { + /** + * OK + */ + 200: HttpNodeResponse; +}; + +export type PostNodesByIdCertificatesRenewResponse = PostNodesByIdCertificatesRenewResponses[keyof PostNodesByIdCertificatesRenewResponses]; + +export type GetNodesByIdChannelsData = { + body?: never; + path: { + /** + * Node ID + */ + id: number; + }; + query?: never; + url: '/nodes/{id}/channels'; +}; + +export type GetNodesByIdChannelsErrors = { + /** + * Validation error + */ + 400: ResponseErrorResponse; + /** + * Node not found + */ + 404: ResponseErrorResponse; + /** + * Internal server error + */ + 500: ResponseErrorResponse; +}; + +export type GetNodesByIdChannelsError = GetNodesByIdChannelsErrors[keyof GetNodesByIdChannelsErrors]; + +export type GetNodesByIdChannelsResponses = { + /** + * OK + */ + 200: HttpNodeChannelsResponse; +}; + +export type GetNodesByIdChannelsResponse = GetNodesByIdChannelsResponses[keyof GetNodesByIdChannelsResponses]; + export type GetNodesByIdEventsData = { body?: never; path: { @@ -4035,4 +4668,250 @@ export type PutOrganizationsByIdResponses = { 200: HandlerOrganizationResponse; }; -export type PutOrganizationsByIdResponse = PutOrganizationsByIdResponses[keyof PutOrganizationsByIdResponses]; \ No newline at end of file +export type PutOrganizationsByIdResponse = PutOrganizationsByIdResponses[keyof PutOrganizationsByIdResponses]; + +export type GetOrganizationsByIdCrlData = { + body?: never; + path: { + /** + * Organization ID + */ + id: number; + }; + query?: never; + url: '/organizations/{id}/crl'; +}; + +export type GetOrganizationsByIdCrlErrors = { + /** + * Bad Request + */ + 400: { + [key: string]: string; + }; + /** + * Internal Server Error + */ + 500: { + [key: string]: string; + }; +}; + +export type GetOrganizationsByIdCrlError = GetOrganizationsByIdCrlErrors[keyof GetOrganizationsByIdCrlErrors]; + +export type GetOrganizationsByIdCrlResponses = { + /** + * PEM encoded CRL + */ + 200: string; +}; + +export type GetOrganizationsByIdCrlResponse = GetOrganizationsByIdCrlResponses[keyof GetOrganizationsByIdCrlResponses]; + +export type PostOrganizationsByIdCrlRevokePemData = { + /** + * Certificate revocation request + */ + body: HandlerRevokeCertificateByPemRequest; + path: { + /** + * Organization ID + */ + id: number; + }; + query?: never; + url: '/organizations/{id}/crl/revoke/pem'; +}; + +export type PostOrganizationsByIdCrlRevokePemErrors = { + /** + * Bad Request + */ + 400: { + [key: string]: string; + }; + /** + * Internal Server Error + */ + 500: { + [key: string]: string; + }; +}; + +export type PostOrganizationsByIdCrlRevokePemError = PostOrganizationsByIdCrlRevokePemErrors[keyof PostOrganizationsByIdCrlRevokePemErrors]; + +export type PostOrganizationsByIdCrlRevokePemResponses = { + /** + * OK + */ + 200: { + [key: string]: string; + }; +}; + +export type PostOrganizationsByIdCrlRevokePemResponse = PostOrganizationsByIdCrlRevokePemResponses[keyof PostOrganizationsByIdCrlRevokePemResponses]; + +export type DeleteOrganizationsByIdCrlRevokeSerialData = { + /** + * Certificate deletion request + */ + body: HandlerDeleteRevokedCertificateRequest; + path: { + /** + * Organization ID + */ + id: number; + }; + query?: never; + url: '/organizations/{id}/crl/revoke/serial'; +}; + +export type DeleteOrganizationsByIdCrlRevokeSerialErrors = { + /** + * Bad Request + */ + 400: { + [key: string]: string; + }; + /** + * Not Found + */ + 404: { + [key: string]: string; + }; + /** + * Internal Server Error + */ + 500: { + [key: string]: string; + }; +}; + +export type DeleteOrganizationsByIdCrlRevokeSerialError = DeleteOrganizationsByIdCrlRevokeSerialErrors[keyof DeleteOrganizationsByIdCrlRevokeSerialErrors]; + +export type DeleteOrganizationsByIdCrlRevokeSerialResponses = { + /** + * OK + */ + 200: { + [key: string]: string; + }; +}; + +export type DeleteOrganizationsByIdCrlRevokeSerialResponse = DeleteOrganizationsByIdCrlRevokeSerialResponses[keyof DeleteOrganizationsByIdCrlRevokeSerialResponses]; + +export type PostOrganizationsByIdCrlRevokeSerialData = { + /** + * Certificate revocation request + */ + body: HandlerRevokeCertificateBySerialRequest; + path: { + /** + * Organization ID + */ + id: number; + }; + query?: never; + url: '/organizations/{id}/crl/revoke/serial'; +}; + +export type PostOrganizationsByIdCrlRevokeSerialErrors = { + /** + * Bad Request + */ + 400: { + [key: string]: string; + }; + /** + * Internal Server Error + */ + 500: { + [key: string]: string; + }; +}; + +export type PostOrganizationsByIdCrlRevokeSerialError = PostOrganizationsByIdCrlRevokeSerialErrors[keyof PostOrganizationsByIdCrlRevokeSerialErrors]; + +export type PostOrganizationsByIdCrlRevokeSerialResponses = { + /** + * OK + */ + 200: { + [key: string]: string; + }; +}; + +export type PostOrganizationsByIdCrlRevokeSerialResponse = PostOrganizationsByIdCrlRevokeSerialResponses[keyof PostOrganizationsByIdCrlRevokeSerialResponses]; + +export type GetOrganizationsByIdRevokedCertificatesData = { + body?: never; + path: { + /** + * Organization ID + */ + id: number; + }; + query?: never; + url: '/organizations/{id}/revoked-certificates'; +}; + +export type GetOrganizationsByIdRevokedCertificatesErrors = { + /** + * Bad Request + */ + 400: { + [key: string]: string; + }; + /** + * Internal Server Error + */ + 500: { + [key: string]: string; + }; +}; + +export type GetOrganizationsByIdRevokedCertificatesError = GetOrganizationsByIdRevokedCertificatesErrors[keyof GetOrganizationsByIdRevokedCertificatesErrors]; + +export type GetOrganizationsByIdRevokedCertificatesResponses = { + /** + * OK + */ + 200: Array; +}; + +export type GetOrganizationsByIdRevokedCertificatesResponse = GetOrganizationsByIdRevokedCertificatesResponses[keyof GetOrganizationsByIdRevokedCertificatesResponses]; + +export type GetSettingsData = { + body?: never; + path?: never; + query?: never; + url: '/settings'; +}; + +export type GetSettingsResponses = { + /** + * OK + */ + 200: ServiceSetting; +}; + +export type GetSettingsResponse = GetSettingsResponses[keyof GetSettingsResponses]; + +export type PostSettingsData = { + /** + * Setting configuration + */ + body: ServiceCreateSettingParams; + path?: never; + query?: never; + url: '/settings'; +}; + +export type PostSettingsResponses = { + /** + * OK + */ + 200: ServiceSetting; +}; + +export type PostSettingsResponse = PostSettingsResponses[keyof PostSettingsResponses]; \ No newline at end of file diff --git a/web/src/components/dashboard/Sidebar.tsx b/web/src/components/dashboard/Sidebar.tsx index c57277a..b6f3fdb 100644 --- a/web/src/components/dashboard/Sidebar.tsx +++ b/web/src/components/dashboard/Sidebar.tsx @@ -1,28 +1,16 @@ -import { Sidebar, SidebarContent, SidebarFooter, SidebarGroup, SidebarGroupLabel, SidebarHeader, SidebarMenu, SidebarMenuButton, SidebarMenuItem, useSidebar } from '@/components/ui/sidebar'; -import { - BadgeCheck, - Bell, - Building, - ChevronsUpDown, - DatabaseBackup, - Globe, - Key, - LogOut, - Network, - Server, - Share2 -} from 'lucide-react'; -('use client') +import { Sidebar, SidebarContent, SidebarFooter, SidebarGroup, SidebarGroupLabel, SidebarHeader, SidebarMenu, SidebarMenuButton, SidebarMenuItem, useSidebar } from '@/components/ui/sidebar' +import { BadgeCheck, Bell, Building, ChevronsUpDown, DatabaseBackup, FileText, Globe, Key, LogOut, Network, Server, Share2, Settings } from 'lucide-react' +;('use client') // import { Project } from '@/api/client' -import { useAuth } from '@/contexts/AuthContext'; +import { useAuth } from '@/contexts/AuthContext' // import { useProjects } from '@/contexts/ProjectsContext' -import { type LucideIcon } from 'lucide-react'; -import { Link, useLocation } from 'react-router-dom'; -import logo from '../../../public/logo.svg'; -import { Avatar, AvatarFallback } from '../ui/avatar'; -import { DropdownMenu, DropdownMenuContent, DropdownMenuGroup, DropdownMenuItem, DropdownMenuLabel, DropdownMenuSeparator, DropdownMenuTrigger } from '../ui/dropdown-menu'; -import { ProBadge } from '../pro/ProBadge'; +import { type LucideIcon } from 'lucide-react' +import { Link, useLocation } from 'react-router-dom' +import logo from '../../../public/logo.svg' +import { Avatar, AvatarFallback } from '../ui/avatar' +import { DropdownMenu, DropdownMenuContent, DropdownMenuGroup, DropdownMenuItem, DropdownMenuLabel, DropdownMenuSeparator, DropdownMenuTrigger } from '../ui/dropdown-menu' +import { ProBadge } from '../pro/ProBadge' type NavItem = { title: string @@ -76,6 +64,21 @@ const data = { url: '/settings/backups', icon: DatabaseBackup, }, + { + title: 'Settings', + url: '/settings/general', + icon: Settings, + }, + ], + }, + { + title: 'API', + items: [ + { + title: 'API Documentation', + url: '/docs', + icon: FileText, + }, ], }, { @@ -101,6 +104,7 @@ const data = { }, ], }, + // { // title: 'Decentralized Identity', // items: [ @@ -223,7 +227,7 @@ function NavUser() {
-{/* + {/* diff --git a/web/src/components/keys/key-item.tsx b/web/src/components/keys/key-item.tsx index cd30598..5525ba1 100644 --- a/web/src/components/keys/key-item.tsx +++ b/web/src/components/keys/key-item.tsx @@ -44,10 +44,7 @@ export function KeyItem({ keyResponse, onDelete, createdAt }: KeyItemProps) { {createdAt && ( {' '} - + Created {formatDistanceToNow(new Date(createdAt), { addSuffix: true })} @@ -115,6 +112,12 @@ export function KeyItem({ keyResponse, onDelete, createdAt }: KeyItemProps) { {keyResponse.sha256Fingerprint}
)} + {keyResponse.ethereumAddress && ( +
+ Ethereum Address: + {keyResponse.ethereumAddress} +
+ )} diff --git a/web/src/components/network-import/ImportNetworkForm.tsx b/web/src/components/network-import/ImportNetworkForm.tsx index c06b890..72b1332 100644 --- a/web/src/components/network-import/ImportNetworkForm.tsx +++ b/web/src/components/network-import/ImportNetworkForm.tsx @@ -60,7 +60,7 @@ type FormValues = z.infer export function ImportNetworkForm() { const [error, setError] = useState(null) const navigate = useNavigate() - const [fabricImportMethod, setFabricImportMethod] = useState<'genesis' | 'organization'>('genesis') + const [fabricImportMethod, setFabricImportMethod] = useState<'genesis' | 'organization'>('organization') const { data: organizations } = useQuery({ ...getOrganizationsOptions(), @@ -87,6 +87,13 @@ export function ImportNetworkForm() { toast.success('Network imported successfully') navigate('/networks') }, + onError: (error: Error) => { + const errorMessage = error.message || 'Failed to import Fabric network' + setError(errorMessage) + toast.error('Failed to import network', { + description: errorMessage, + }) + }, }) const importBesuNetwork = useMutation({ @@ -109,7 +116,7 @@ export function ImportNetworkForm() { defaultValues: { networkType: 'fabric', fabricImport: { - importMethod: 'genesis', + importMethod: 'organization', }, }, }) @@ -126,6 +133,7 @@ export function ImportNetworkForm() { setError(null) if (data.networkType === 'fabric') { + console.log('data.fabricImport', data.fabricImport) if (data.fabricImport.importMethod === 'genesis') { if (!data.fabricImport.genesisBlock) { setError('Genesis block is required') @@ -194,7 +202,7 @@ export function ImportNetworkForm() { } } - const isLoading = importFabricNetwork.isPending || importBesuNetwork.isPending + const isLoading = importFabricNetwork.isPending || importBesuNetwork.isPending || importFabricNetworkByOrg.isPending return ( @@ -241,22 +249,22 @@ export function ImportNetworkForm() {
Import Method handleImportMethodChange(value as 'genesis' | 'organization')} className="flex flex-col space-y-1" > - + - Import using genesis block + Import using organization, orderer URL and TLS certificate - + - Import using organization, orderer URL and TLS certificate + Import using genesis block
diff --git a/web/src/components/networks/FabricNetworkDetails.tsx b/web/src/components/networks/FabricNetworkDetails.tsx index f6cff02..5f85cf8 100644 --- a/web/src/components/networks/FabricNetworkDetails.tsx +++ b/web/src/components/networks/FabricNetworkDetails.tsx @@ -7,7 +7,9 @@ import { getOrganizationsOptions, postNetworksFabricByIdAnchorPeersMutation, postNetworksFabricByIdOrderersByOrdererIdJoinMutation, + postNetworksFabricByIdOrganizationCrlMutation, postNetworksFabricByIdPeersByPeerIdJoinMutation, + postNetworksFabricByIdUpdateConfigMutation, } from '@/api/client/@tanstack/react-query.gen' import { BesuIcon } from '@/components/icons/besu-icon' import { FabricIcon } from '@/components/icons/fabric-icon' @@ -25,7 +27,7 @@ import { Card } from '@/components/ui/card' import { Skeleton } from '@/components/ui/skeleton' import { TimeAgo } from '@/components/ui/time-ago' import { useMutation, useQuery } from '@tanstack/react-query' -import { Activity, AlertTriangle, Anchor, ArrowLeft, Check, Code, Copy, Network, Plus, Settings } from 'lucide-react' +import { Activity, AlertTriangle, Anchor, ArrowLeft, Check, Code, Copy, Network, Plus, Settings, Blocks, ShieldAlert, ArrowUpToLine, Loader2 } from 'lucide-react' import { useMemo, useState } from 'react' import ReactMarkdown from 'react-markdown' import { Link, useParams, useSearchParams } from 'react-router-dom' @@ -34,6 +36,24 @@ import { docco } from 'react-syntax-highlighter/dist/esm/styles/hljs' import rehypeRaw from 'rehype-raw' import { toast } from 'sonner' import { AddMultipleNodesDialog } from './add-multiple-nodes-dialog' +import { ChannelUpdateForm } from '../nodes/ChannelUpdateForm' +import { BlockExplorer } from './block-explorer' +import { useForm } from 'react-hook-form' +import { zodResolver } from '@hookform/resolvers/zod' +import * as z from 'zod' +import { + getOrganizationsByIdRevokedCertificatesOptions, + postOrganizationsByIdCrlRevokeSerialMutation, + postOrganizationsByIdCrlRevokePemMutation, + deleteOrganizationsByIdCrlRevokeSerialMutation, +} from '@/api/client/@tanstack/react-query.gen' +import { Select, SelectContent, SelectItem, SelectTrigger, SelectValue } from '@/components/ui/select' +import { Dialog, DialogContent, DialogDescription, DialogFooter, DialogHeader, DialogTitle, DialogTrigger } from '@/components/ui/dialog' +import { Form, FormControl, FormField, FormItem, FormLabel, FormMessage } from '@/components/ui/form' +import { AlertDialog, AlertDialogAction, AlertDialogCancel, AlertDialogContent, AlertDialogDescription, AlertDialogFooter, AlertDialogHeader, AlertDialogTitle } from '@/components/ui/alert-dialog' +import { Textarea } from '@/components/ui/textarea' +import { Input } from '@/components/ui/input' +import { Trash2 } from 'lucide-react' interface FabricNetworkDetailsProps { network: HttpNetworkResponse @@ -181,6 +201,332 @@ function CopyButton({ text }: { text: string }) { ) } +function CRLManagement({ network, organizations }: { network: HttpNetworkResponse; organizations: any[] }) { + const [selectedOrg, setSelectedOrg] = useState(null) + const { + data: crl, + refetch, + isLoading: isCrlLoading, + } = useQuery({ + ...getOrganizationsByIdRevokedCertificatesOptions({ + path: { id: selectedOrg! }, + }), + enabled: !!selectedOrg, + }) + + // Form for serial number + const serialForm = useForm<{ serialNumber: string }>({ + resolver: zodResolver( + z.object({ + serialNumber: z.string().min(1, 'Serial number is required'), + }) + ), + }) + + // Form for PEM + const pemForm = useForm<{ pem: string }>({ + resolver: zodResolver( + z.object({ + pem: z.string().min(1, 'PEM certificate is required'), + }) + ), + }) + + // Mutation for adding by serial number + const addBySerialMutation = useMutation({ + ...postOrganizationsByIdCrlRevokeSerialMutation(), + onSuccess: () => { + toast.success('Certificate revoked successfully') + refetch() + serialForm.reset() + setSerialDialogOpen(false) + }, + onError: (error: any) => { + if (error instanceof Error) { + toast.error(`Failed to revoke certificate: ${error.message}`) + } else if (error.error?.message) { + toast.error(`Failed to revoke certificate: ${error.error.message}`) + } else { + toast.error('An unknown error occurred') + } + }, + }) + + // Mutation for adding by PEM + const addByPemMutation = useMutation({ + ...postOrganizationsByIdCrlRevokePemMutation(), + onSuccess: () => { + toast.success('Certificate revoked successfully') + refetch() + pemForm.reset() + setPemDialogOpen(false) + }, + onError: (error: any) => { + if (error instanceof Error) { + toast.error(`Failed to revoke certificate: ${error.message}`) + } else if (error.error?.message) { + toast.error(`Failed to revoke certificate: ${error.error.message}`) + } else { + toast.error('An unknown error occurred') + } + }, + }) + + // Mutation for removing from CRL + const unrevokeMutation = useMutation({ + ...deleteOrganizationsByIdCrlRevokeSerialMutation(), + onSuccess: () => { + toast.success('Certificate unrevoked successfully') + refetch() + setCertificateToDelete(null) + }, + onError: (error: any) => { + if (error instanceof Error) { + toast.error(`Failed to unrevoke certificate: ${error.message}`) + } else if (error.error?.message) { + toast.error(`Failed to unrevoke certificate: ${error.error.message}`) + } else { + toast.error('An unknown error occurred') + } + }, + }) + + // Mutation for applying CRL to channel + const applyCRLMutation = useMutation({ + ...postNetworksFabricByIdOrganizationCrlMutation(), + onSuccess: () => { + toast.success('CRL applied to channel successfully') + }, + onError: (error: any) => { + if (error instanceof Error) { + toast.error(`Failed to apply CRL to channel: ${error.message}`) + } else if (error.error?.message) { + toast.error(`Failed to apply CRL to channel: ${error.error.message}`) + } else { + toast.error('An unknown error occurred') + } + }, + }) + + const handleApplyCRL = () => { + if (!selectedOrg || !network.id) return + + const selectedOrgData = organizations.find((org) => org.id === selectedOrg) + if (!selectedOrgData) return + + applyCRLMutation.mutate({ + path: { id: network.id }, + body: { + organizationId: selectedOrgData.id, + }, + }) + } + + const [serialDialogOpen, setSerialDialogOpen] = useState(false) + const [pemDialogOpen, setPemDialogOpen] = useState(false) + const [certificateToDelete, setCertificateToDelete] = useState(null) + + if (!organizations || organizations.length === 0) { + return ( + +
+
+ +
+
+

Certificate Revocation List

+

No organizations found

+
+
+ + + You need at least one organization to manage certificate revocations. + +
+ ) + } + + return ( + +
+
+ +
+
+

Certificate Revocation List

+

Manage revoked certificates for your organizations

+
+
+ +
+ + + {selectedOrg && ( + <> +
+ + + + + + + Revoke Certificate by Serial Number + Enter the serial number of the certificate to revoke + +
+ + addBySerialMutation.mutate({ + path: { id: selectedOrg }, + body: { serialNumber: data.serialNumber }, + }) + )} + > + ( + + Serial Number + + + + + + )} + /> + + + + + +
+
+ + + + + + + + Revoke Certificate by PEM + Paste the PEM certificate to revoke + +
+ + addByPemMutation.mutate({ + path: { id: selectedOrg }, + body: { certificate: data.pem }, + }) + )} + > + ( + + PEM Certificate + +