From 008edf31363e4eaa785409b132ab2c0c268741d9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Viktor=20Tigerstr=C3=B6m?= Date: Mon, 9 Jun 2025 11:31:19 +0200 Subject: [PATCH 1/9] firewalldb: ensure that test SQL store is closed We add a helper function to the functions that creates the test SQL stores, in order to ensure that the store is properly closed when the test is cleaned up. --- firewalldb/actions_test.go | 9 --------- firewalldb/test_postgres.go | 4 ++-- firewalldb/test_sql.go | 16 ++++++++++++++-- firewalldb/test_sqlite.go | 6 +++--- 4 files changed, 19 insertions(+), 16 deletions(-) diff --git a/firewalldb/actions_test.go b/firewalldb/actions_test.go index c27e53e96..69990c1da 100644 --- a/firewalldb/actions_test.go +++ b/firewalldb/actions_test.go @@ -28,9 +28,6 @@ func TestActionStorage(t *testing.T) { sessDB := session.NewTestDBWithAccounts(t, clock, accountsDB) db := NewTestDBWithSessionsAndAccounts(t, sessDB, accountsDB, clock) - t.Cleanup(func() { - _ = db.Close() - }) // Assert that attempting to add an action for a session that does not // exist returns an error. @@ -198,9 +195,6 @@ func TestListActions(t *testing.T) { sessDB := session.NewTestDB(t, clock) db := NewTestDBWithSessions(t, sessDB, clock) - t.Cleanup(func() { - _ = db.Close() - }) // Add 2 sessions that we can reference. sess1, err := sessDB.NewSession( @@ -466,9 +460,6 @@ func TestListGroupActions(t *testing.T) { } db := NewTestDBWithSessions(t, sessDB, clock) - t.Cleanup(func() { - _ = db.Close() - }) // There should not be any actions in group 1 yet. al, _, _, err := db.ListActions(ctx, nil, WithActionGroupID(group1)) diff --git a/firewalldb/test_postgres.go b/firewalldb/test_postgres.go index f5777e4cb..324aea2c4 100644 --- a/firewalldb/test_postgres.go +++ b/firewalldb/test_postgres.go @@ -11,11 +11,11 @@ import ( // NewTestDB is a helper function that creates an BBolt database for testing. func NewTestDB(t *testing.T, clock clock.Clock) *SQLDB { - return NewSQLDB(db.NewTestPostgresDB(t).BaseDB, clock) + return createStore(t, db.NewTestPostgresDB(t).BaseDB, clock) } // NewTestDBFromPath is a helper function that creates a new BoltStore with a // connection to an existing BBolt database for testing. func NewTestDBFromPath(t *testing.T, _ string, clock clock.Clock) *SQLDB { - return NewSQLDB(db.NewTestPostgresDB(t).BaseDB, clock) + return createStore(t, db.NewTestPostgresDB(t).BaseDB, clock) } diff --git a/firewalldb/test_sql.go b/firewalldb/test_sql.go index 03dcfbebf..2f6c6e62e 100644 --- a/firewalldb/test_sql.go +++ b/firewalldb/test_sql.go @@ -7,6 +7,7 @@ import ( "time" "github.com/lightninglabs/lightning-terminal/accounts" + "github.com/lightninglabs/lightning-terminal/db" "github.com/lightninglabs/lightning-terminal/session" "github.com/lightningnetwork/lnd/clock" "github.com/stretchr/testify/require" @@ -20,7 +21,7 @@ func NewTestDBWithSessions(t *testing.T, sessionStore session.Store, sessions, ok := sessionStore.(*session.SQLStore) require.True(t, ok) - return NewSQLDB(sessions.BaseDB, clock) + return createStore(t, sessions.BaseDB, clock) } // NewTestDBWithSessionsAndAccounts creates a new test SQLDB Store with access @@ -36,7 +37,7 @@ func NewTestDBWithSessionsAndAccounts(t *testing.T, sessionStore SessionDB, require.Equal(t, accounts.BaseDB, sessions.BaseDB) - return NewSQLDB(sessions.BaseDB, clock) + return createStore(t, sessions.BaseDB, clock) } func assertEqualActions(t *testing.T, expected, got *Action) { @@ -52,3 +53,14 @@ func assertEqualActions(t *testing.T, expected, got *Action) { expected.AttemptedAt = expectedAttemptedAt got.AttemptedAt = actualAttemptedAt } + +// createStore is a helper function that creates a new SQLDB and ensure that +// it is closed when during the test cleanup. +func createStore(t *testing.T, sqlDB *db.BaseDB, clock clock.Clock) *SQLDB { + store := NewSQLDB(sqlDB, clock) + t.Cleanup(func() { + require.NoError(t, store.Close()) + }) + + return store +} diff --git a/firewalldb/test_sqlite.go b/firewalldb/test_sqlite.go index 5496cb205..506b49bcd 100644 --- a/firewalldb/test_sqlite.go +++ b/firewalldb/test_sqlite.go @@ -11,13 +11,13 @@ import ( // NewTestDB is a helper function that creates an BBolt database for testing. func NewTestDB(t *testing.T, clock clock.Clock) *SQLDB { - return NewSQLDB(db.NewTestSqliteDB(t).BaseDB, clock) + return createStore(t, db.NewTestSqliteDB(t).BaseDB, clock) } // NewTestDBFromPath is a helper function that creates a new BoltStore with a // connection to an existing BBolt database for testing. func NewTestDBFromPath(t *testing.T, dbPath string, clock clock.Clock) *SQLDB { - return NewSQLDB( - db.NewTestSqliteDbHandleFromPath(t, dbPath).BaseDB, clock, + return createStore( + t, db.NewTestSqliteDbHandleFromPath(t, dbPath).BaseDB, clock, ) } From 576471e29d1877cd828db4c5cbbf205d510c5d4d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Viktor=20Tigerstr=C3=B6m?= Date: Tue, 27 May 2025 16:52:50 +0200 Subject: [PATCH 2/9] firewalldb: export FirewallDBs interface In the upcoming migration of the firewall database to SQL, the helper functions that creates the test databases of different types, need to return a unified interface in order to not have to control the migration tests file by build tags. Therefore, we export the unified interface FirewallDBs, so that it can be returned public test DB creation functions --- firewalldb/db.go | 14 +++----------- firewalldb/interface.go | 8 ++++++++ 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/firewalldb/db.go b/firewalldb/db.go index b8d9ed06f..a8349a538 100644 --- a/firewalldb/db.go +++ b/firewalldb/db.go @@ -14,29 +14,21 @@ var ( ErrNoSuchKeyFound = fmt.Errorf("no such key found") ) -// firewallDBs is an interface that groups the RulesDB and PrivacyMapper -// interfaces. -type firewallDBs interface { - RulesDB - PrivacyMapper - ActionDB -} - // DB manages the firewall rules database. type DB struct { started sync.Once stopped sync.Once - firewallDBs + FirewallDBs cancel fn.Option[context.CancelFunc] } // NewDB creates a new firewall database. For now, it only contains the // underlying rules' and privacy mapper databases. -func NewDB(dbs firewallDBs) *DB { +func NewDB(dbs FirewallDBs) *DB { return &DB{ - firewallDBs: dbs, + FirewallDBs: dbs, } } diff --git a/firewalldb/interface.go b/firewalldb/interface.go index 5ee729e91..c2955bdc6 100644 --- a/firewalldb/interface.go +++ b/firewalldb/interface.go @@ -134,3 +134,11 @@ type ActionDB interface { // and feature name. GetActionsReadDB(groupID session.ID, featureName string) ActionsReadDB } + +// FirewallDBs is an interface that groups the RulesDB, PrivacyMapper and +// ActionDB interfaces. +type FirewallDBs interface { + RulesDB + PrivacyMapper + ActionDB +} From d7ca6d3659db13e805e20cb12173a7ddb855a2d2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Viktor=20Tigerstr=C3=B6m?= Date: Mon, 19 May 2025 13:58:38 +0200 Subject: [PATCH 3/9] firewalldb: update NewTestDB funcs to return FirewallDBs In the upcoming migration of the firewall database to SQL, the helper functions that creates the test databases of different types, need to return a unified interface in order to not have to control the migration tests file by build tags. Therefore, we update the `NewTestDB` functions to return the `FirewallDBs` interface instead of the specific store implementation type. --- firewalldb/test_kvdb.go | 18 +++++++++++------- firewalldb/test_postgres.go | 4 ++-- firewalldb/test_sql.go | 5 ++--- firewalldb/test_sqlite.go | 4 ++-- 4 files changed, 17 insertions(+), 14 deletions(-) diff --git a/firewalldb/test_kvdb.go b/firewalldb/test_kvdb.go index 6f7a49aa3..c3cd4533a 100644 --- a/firewalldb/test_kvdb.go +++ b/firewalldb/test_kvdb.go @@ -6,34 +6,37 @@ import ( "testing" "github.com/lightninglabs/lightning-terminal/accounts" + "github.com/lightninglabs/lightning-terminal/session" "github.com/lightningnetwork/lnd/clock" "github.com/lightningnetwork/lnd/fn" "github.com/stretchr/testify/require" ) // NewTestDB is a helper function that creates an BBolt database for testing. -func NewTestDB(t *testing.T, clock clock.Clock) *BoltDB { +func NewTestDB(t *testing.T, clock clock.Clock) FirewallDBs { return NewTestDBFromPath(t, t.TempDir(), clock) } // NewTestDBFromPath is a helper function that creates a new BoltStore with a // connection to an existing BBolt database for testing. -func NewTestDBFromPath(t *testing.T, dbPath string, clock clock.Clock) *BoltDB { +func NewTestDBFromPath(t *testing.T, dbPath string, + clock clock.Clock) FirewallDBs { + return newDBFromPathWithSessions(t, dbPath, nil, nil, clock) } // NewTestDBWithSessions creates a new test BoltDB Store with access to an // existing sessions DB. -func NewTestDBWithSessions(t *testing.T, sessStore SessionDB, - clock clock.Clock) *BoltDB { +func NewTestDBWithSessions(t *testing.T, sessStore session.Store, + clock clock.Clock) FirewallDBs { return newDBFromPathWithSessions(t, t.TempDir(), sessStore, nil, clock) } // NewTestDBWithSessionsAndAccounts creates a new test BoltDB Store with access // to an existing sessions DB and accounts DB. -func NewTestDBWithSessionsAndAccounts(t *testing.T, sessStore SessionDB, - acctStore AccountsDB, clock clock.Clock) *BoltDB { +func NewTestDBWithSessionsAndAccounts(t *testing.T, sessStore session.Store, + acctStore AccountsDB, clock clock.Clock) FirewallDBs { return newDBFromPathWithSessions( t, t.TempDir(), sessStore, acctStore, clock, @@ -41,7 +44,8 @@ func NewTestDBWithSessionsAndAccounts(t *testing.T, sessStore SessionDB, } func newDBFromPathWithSessions(t *testing.T, dbPath string, - sessStore SessionDB, acctStore AccountsDB, clock clock.Clock) *BoltDB { + sessStore session.Store, acctStore AccountsDB, + clock clock.Clock) FirewallDBs { store, err := NewBoltDB(dbPath, DBFilename, sessStore, acctStore, clock) require.NoError(t, err) diff --git a/firewalldb/test_postgres.go b/firewalldb/test_postgres.go index 324aea2c4..732b19b4a 100644 --- a/firewalldb/test_postgres.go +++ b/firewalldb/test_postgres.go @@ -10,12 +10,12 @@ import ( ) // NewTestDB is a helper function that creates an BBolt database for testing. -func NewTestDB(t *testing.T, clock clock.Clock) *SQLDB { +func NewTestDB(t *testing.T, clock clock.Clock) FirewallDBs { return createStore(t, db.NewTestPostgresDB(t).BaseDB, clock) } // NewTestDBFromPath is a helper function that creates a new BoltStore with a // connection to an existing BBolt database for testing. -func NewTestDBFromPath(t *testing.T, _ string, clock clock.Clock) *SQLDB { +func NewTestDBFromPath(t *testing.T, _ string, clock clock.Clock) FirewallDBs { return createStore(t, db.NewTestPostgresDB(t).BaseDB, clock) } diff --git a/firewalldb/test_sql.go b/firewalldb/test_sql.go index 2f6c6e62e..a412441f8 100644 --- a/firewalldb/test_sql.go +++ b/firewalldb/test_sql.go @@ -16,8 +16,7 @@ import ( // NewTestDBWithSessions creates a new test SQLDB Store with access to an // existing sessions DB. func NewTestDBWithSessions(t *testing.T, sessionStore session.Store, - clock clock.Clock) *SQLDB { - + clock clock.Clock) FirewallDBs { sessions, ok := sessionStore.(*session.SQLStore) require.True(t, ok) @@ -27,7 +26,7 @@ func NewTestDBWithSessions(t *testing.T, sessionStore session.Store, // NewTestDBWithSessionsAndAccounts creates a new test SQLDB Store with access // to an existing sessions DB and accounts DB. func NewTestDBWithSessionsAndAccounts(t *testing.T, sessionStore SessionDB, - acctStore AccountsDB, clock clock.Clock) *SQLDB { + acctStore AccountsDB, clock clock.Clock) FirewallDBs { sessions, ok := sessionStore.(*session.SQLStore) require.True(t, ok) diff --git a/firewalldb/test_sqlite.go b/firewalldb/test_sqlite.go index 506b49bcd..49b956d7d 100644 --- a/firewalldb/test_sqlite.go +++ b/firewalldb/test_sqlite.go @@ -10,13 +10,13 @@ import ( ) // NewTestDB is a helper function that creates an BBolt database for testing. -func NewTestDB(t *testing.T, clock clock.Clock) *SQLDB { +func NewTestDB(t *testing.T, clock clock.Clock) FirewallDBs { return createStore(t, db.NewTestSqliteDB(t).BaseDB, clock) } // NewTestDBFromPath is a helper function that creates a new BoltStore with a // connection to an existing BBolt database for testing. -func NewTestDBFromPath(t *testing.T, dbPath string, clock clock.Clock) *SQLDB { +func NewTestDBFromPath(t *testing.T, dbPath string, clock clock.Clock) FirewallDBs { return createStore( t, db.NewTestSqliteDbHandleFromPath(t, dbPath).BaseDB, clock, ) From 87f5ad9d5d61b376a224e6010374a0d856a48a25 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Viktor=20Tigerstr=C3=B6m?= Date: Tue, 20 May 2025 10:39:39 +0200 Subject: [PATCH 4/9] db: add List All Kv Records query During the upcoming upcoming migration of the firewall database to SQL, we need to be able to check all kvstores records in the SQL database, to validate that the migration is successful in tests. This commits adds a query to list all kvstores records, which enables that functionality. --- db/sqlc/kvstores.sql.go | 36 ++++++++++++++++++++++++++++++++++++ db/sqlc/querier.go | 1 + db/sqlc/queries/kvstores.sql | 4 ++++ 3 files changed, 41 insertions(+) diff --git a/db/sqlc/kvstores.sql.go b/db/sqlc/kvstores.sql.go index b2e6632f4..c0949d173 100644 --- a/db/sqlc/kvstores.sql.go +++ b/db/sqlc/kvstores.sql.go @@ -257,6 +257,42 @@ func (q *Queries) InsertKVStoreRecord(ctx context.Context, arg InsertKVStoreReco return err } +const listAllKVStoresRecords = `-- name: ListAllKVStoresRecords :many +SELECT id, perm, rule_id, session_id, feature_id, entry_key, value +FROM kvstores +` + +func (q *Queries) ListAllKVStoresRecords(ctx context.Context) ([]Kvstore, error) { + rows, err := q.db.QueryContext(ctx, listAllKVStoresRecords) + if err != nil { + return nil, err + } + defer rows.Close() + var items []Kvstore + for rows.Next() { + var i Kvstore + if err := rows.Scan( + &i.ID, + &i.Perm, + &i.RuleID, + &i.SessionID, + &i.FeatureID, + &i.EntryKey, + &i.Value, + ); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Close(); err != nil { + return nil, err + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + const updateFeatureKVStoreRecord = `-- name: UpdateFeatureKVStoreRecord :exec UPDATE kvstores SET value = $1 diff --git a/db/sqlc/querier.go b/db/sqlc/querier.go index df89d0898..117a1fbc5 100644 --- a/db/sqlc/querier.go +++ b/db/sqlc/querier.go @@ -57,6 +57,7 @@ type Querier interface { ListAccountInvoices(ctx context.Context, accountID int64) ([]AccountInvoice, error) ListAccountPayments(ctx context.Context, accountID int64) ([]AccountPayment, error) ListAllAccounts(ctx context.Context) ([]Account, error) + ListAllKVStoresRecords(ctx context.Context) ([]Kvstore, error) ListSessions(ctx context.Context) ([]Session, error) ListSessionsByState(ctx context.Context, state int16) ([]Session, error) ListSessionsByType(ctx context.Context, type_ int16) ([]Session, error) diff --git a/db/sqlc/queries/kvstores.sql b/db/sqlc/queries/kvstores.sql index 7963e46a4..1ebfe3b0d 100644 --- a/db/sqlc/queries/kvstores.sql +++ b/db/sqlc/queries/kvstores.sql @@ -28,6 +28,10 @@ VALUES ($1, $2, $3, $4, $5, $6); DELETE FROM kvstores WHERE perm = false; +-- name: ListAllKVStoresRecords :many +SELECT * +FROM kvstores; + -- name: GetGlobalKVStoreRecord :one SELECT value FROM kvstores From 0fc319a50a4e9bf2ca067918fabcd925b2c6d95b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Viktor=20Tigerstr=C3=B6m?= Date: Tue, 6 May 2025 19:42:08 +0200 Subject: [PATCH 5/9] firewalldb: clarify bbolt kvstores illustration During the migration of the kvstores to SQL, we'll iterate over the buckets in the bbolt database, which holds all kvstores records. In order to understand why the migration iterates over the buckets in the specific order, we need to clarify the bbolt kvstores illustration docs, so that it correctly reflects how the records are actually stored in the bbolt database. --- firewalldb/kvstores_kvdb.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/firewalldb/kvstores_kvdb.go b/firewalldb/kvstores_kvdb.go index 51721d475..d1e8e35a6 100644 --- a/firewalldb/kvstores_kvdb.go +++ b/firewalldb/kvstores_kvdb.go @@ -16,13 +16,13 @@ the temporary store changes instead of just keeping an in-memory store is that we can then guarantee atomicity if changes are made to both the permanent and temporary stores. -rules -> perm -> rule-name -> global -> {k:v} - -> sessions -> group ID -> session-kv-store -> {k:v} - -> feature-kv-stores -> feature-name -> {k:v} +"rules" -> "perm" -> rule-name -> "global" -> {k:v} + "session-kv-store" -> group ID -> {k:v} + -> "feature-kv-stores" -> feature-name -> {k:v} - -> temp -> rule-name -> global -> {k:v} - -> sessions -> group ID -> session-kv-store -> {k:v} - -> feature-kv-stores -> feature-name -> {k:v} + -> "temp" -> rule-name -> "global" -> {k:v} + "session-kv-store" -> group ID -> {k:v} + -> "feature-kv-stores" -> feature-name -> {k:v} */ var ( From 17af8cda444e1eb71fbb5908efe1b1a5f2a5b2d4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Viktor=20Tigerstr=C3=B6m?= Date: Tue, 6 May 2025 19:44:31 +0200 Subject: [PATCH 6/9] firewalldb: add kvstores kvdb to SQL migration This commit introduces the migration logic for transitioning the kvstores store from kvdb to SQL. Note that as of this commit, the migration is not yet triggered by any production code, i.e. only tests execute the migration logic. --- firewalldb/sql_migration.go | 486 ++++++++++++++++++++++++++++ firewalldb/sql_migration_test.go | 537 +++++++++++++++++++++++++++++++ 2 files changed, 1023 insertions(+) create mode 100644 firewalldb/sql_migration.go create mode 100644 firewalldb/sql_migration_test.go diff --git a/firewalldb/sql_migration.go b/firewalldb/sql_migration.go new file mode 100644 index 000000000..092b61c8e --- /dev/null +++ b/firewalldb/sql_migration.go @@ -0,0 +1,486 @@ +package firewalldb + +import ( + "bytes" + "context" + "database/sql" + "errors" + "fmt" + "github.com/lightninglabs/lightning-terminal/db/sqlc" + "github.com/lightningnetwork/lnd/sqldb" + "go.etcd.io/bbolt" +) + +// kvParams is a type alias for the InsertKVStoreRecordParams, to shorten the +// line length in the migration code. +type kvParams = sqlc.InsertKVStoreRecordParams + +// MigrateFirewallDBToSQL runs the migration of the firwalldb stores from the +// bbolt database to a SQL database. The migration is done in a single +// transaction to ensure that all rows in the stores are migrated or none at +// all. +// +// Note that this migration currently only migrates the kvstores, but will be +// extended in the future to also migrate the privacy mapper and action stores. +// +// NOTE: As sessions may contain linked sessions and accounts, the sessions and +// accounts sql migration MUST be run prior to this migration. +func MigrateFirewallDBToSQL(ctx context.Context, kvStore *bbolt.DB, + tx SQLQueries) error { + + log.Infof("Starting migration of the rules DB to SQL") + + err := migrateKVStoresDBToSQL(ctx, kvStore, tx) + if err != nil { + return err + } + + log.Infof("The rules DB has been migrated from KV to SQL.") + + // TODO(viktor): Add migration for the privacy mapper and the action + // stores. + + return nil +} + +// migrateKVStoresDBToSQL runs the migration of all KV stores from the KV +// database to the SQL database. The function also asserts that the +// migrated values match the original values in the KV store. +// See the illustration in the firwalldb/kvstores_kvdb.go file to understand +// the structure of the KV stores, and why we process the buckets in the +// order we do. +// Note that this function and the subsequent functions are intentionally +// designed to loop over all buckets and values that exist in the KV store, +// so that we are sure that we actually find all stores and values that +// exist in the KV store, and can be sure that the kv store actually follows +// the expected structure. +func migrateKVStoresDBToSQL(ctx context.Context, kvStore *bbolt.DB, + sqlTx SQLQueries) error { + + log.Infof("Starting migration of the KV stores to SQL") + + // allParams will hold all the kvParams that are inserted into the + // SQL database during the migration. + var allParams []kvParams + + err := kvStore.View(func(kvTx *bbolt.Tx) error { + for _, perm := range []bool{true, false} { + mainBucket, err := getMainBucket(kvTx, false, perm) + if err != nil { + return err + } + + if mainBucket == nil { + // If the mainBucket doesn't exist, there are no + // records to migrate under that bucket, + // therefore we don't error, and just proceed + // to not migrate any records under that bucket. + continue + } + + err = mainBucket.ForEach(func(k, v []byte) error { + if v != nil { + return errors.New("expected only " + + "buckets under main bucket") + } + + ruleName := k + ruleNameBucket := mainBucket.Bucket(k) + if ruleNameBucket == nil { + return fmt.Errorf("rule bucket %s "+ + "not found", string(k)) + } + + ruleId, err := sqlTx.GetOrInsertRuleID( + ctx, string(ruleName), + ) + if err != nil { + return err + } + + params, err := processRuleBucket( + ctx, sqlTx, perm, ruleId, + ruleNameBucket, + ) + if err != nil { + return err + } + + allParams = append(allParams, params...) + + return nil + }) + if err != nil { + return err + } + } + + return nil + }) + if err != nil { + return err + } + + // After the migration is done, we validate that all inserted kvParams + // can match the original values in the KV store. Note that this is done + // after all values have been inserted, in order to ensure that the + // migration doesn't overwrite any values after they were inserted. + for _, param := range allParams { + switch { + case param.FeatureID.Valid && param.SessionID.Valid: + migratedValue, err := sqlTx.GetFeatureKVStoreRecord( + ctx, + sqlc.GetFeatureKVStoreRecordParams{ + Key: param.EntryKey, + Perm: param.Perm, + RuleID: param.RuleID, + SessionID: param.SessionID, + FeatureID: param.FeatureID, + }, + ) + if err != nil { + return fmt.Errorf("retreiving of migrated "+ + "feature specific kv store record "+ + "failed %w", err) + } + + if !bytes.Equal(migratedValue, param.Value) { + return fmt.Errorf("migrated feature specific "+ + "kv record value %x does not match "+ + "original value %x", migratedValue, + param.Value) + } + + case param.SessionID.Valid: + migratedValue, err := sqlTx.GetSessionKVStoreRecord( + ctx, + sqlc.GetSessionKVStoreRecordParams{ + Key: param.EntryKey, + Perm: param.Perm, + RuleID: param.RuleID, + SessionID: param.SessionID, + }, + ) + if err != nil { + return fmt.Errorf("retreiving of migrated "+ + "session wide kv store record "+ + "failed %w", err) + } + + if !bytes.Equal(migratedValue, param.Value) { + return fmt.Errorf("migrated session wide kv "+ + "record value %x does not match "+ + "original value %x", migratedValue, + param.Value) + } + + case !param.FeatureID.Valid && !param.SessionID.Valid: + migratedValue, err := sqlTx.GetGlobalKVStoreRecord( + ctx, + sqlc.GetGlobalKVStoreRecordParams{ + Key: param.EntryKey, + Perm: param.Perm, + RuleID: param.RuleID, + }, + ) + if err != nil { + return fmt.Errorf("retreiving of migrated "+ + "global kv store record failed %w", err) + } + + if !bytes.Equal(migratedValue, param.Value) { + return fmt.Errorf("migrated global kv record "+ + "value %x does not match original "+ + "value %x", migratedValue, param.Value) + } + + default: + return fmt.Errorf("unexpected combination of "+ + "FeatureID and SessionID for: %v", param) + } + } + + log.Infof("Migration of the KV stores to SQL completed. Total number "+ + "of rows migrated: %d", len(allParams)) + + return nil +} + +// processRuleBucket processes a single rule bucket, which contains the +// global and session-kv-store key buckets. +func processRuleBucket(ctx context.Context, sqlTx SQLQueries, perm bool, + ruleSqlId int64, ruleBucket *bbolt.Bucket) ([]kvParams, error) { + + var params []kvParams + + return params, ruleBucket.ForEach(func(k, v []byte) error { + switch { + case v != nil: + return errors.New("expected only buckets under " + + "rule-name bucket") + case bytes.Equal(k, globalKVStoreBucketKey): + globalBucket := ruleBucket.Bucket( + globalKVStoreBucketKey, + ) + if globalBucket == nil { + return fmt.Errorf("global bucket %s for rule "+ + "id %d not found", string(k), ruleSqlId) + } + + p, err := processGlobalRuleBucket( + ctx, sqlTx, perm, ruleSqlId, globalBucket, + ) + if err != nil { + return err + } + + params = append(params, p...) + + return nil + case bytes.Equal(k, sessKVStoreBucketKey): + sessionBucket := ruleBucket.Bucket( + sessKVStoreBucketKey, + ) + if sessionBucket == nil { + return fmt.Errorf("session bucket %s for rule "+ + "id %d not found", string(k), ruleSqlId) + } + + p, err := processSessionBucket( + ctx, sqlTx, perm, ruleSqlId, sessionBucket, + ) + if err != nil { + return err + } + + params = append(params, p...) + + return nil + default: + return fmt.Errorf("unexpected bucket %s under "+ + "rule-name bucket", string(k)) + } + }) +} + +// processGlobalRuleBucket processes the global bucket under a rule bucket, +// which contains the global key-value store records for the rule. +// It inserts the records into the SQL database and asserts that +// the migrated values match the original values in the KV store. +func processGlobalRuleBucket(ctx context.Context, sqlTx SQLQueries, perm bool, + ruleSqlId int64, globalBucket *bbolt.Bucket) ([]kvParams, error) { + + var params []kvParams + + return params, globalBucket.ForEach(func(k, v []byte) error { + if v == nil { + return errors.New("expected only key-values under " + + "global rule-name bucket") + } + + globalInsertParams := kvParams{ + EntryKey: string(k), + Value: v, + Perm: perm, + RuleID: ruleSqlId, + } + + err := sqlTx.InsertKVStoreRecord(ctx, globalInsertParams) + if err != nil { + return fmt.Errorf("inserting global kv store "+ + "record failed %w", err) + } + + params = append(params, globalInsertParams) + + return nil + }) +} + +// processSessionBucket processes the session-kv-store bucket under a rule +// bucket, which contains the group-id buckets for that rule. +func processSessionBucket(ctx context.Context, sqlTx SQLQueries, perm bool, + ruleSqlId int64, mainSessionBucket *bbolt.Bucket) ([]kvParams, error) { + + var params []kvParams + + return params, mainSessionBucket.ForEach(func(groupId, v []byte) error { + if v != nil { + return fmt.Errorf("expected only buckets under "+ + "%s bucket", string(sessKVStoreBucketKey)) + } + + groupBucket := mainSessionBucket.Bucket(groupId) + if groupBucket == nil { + return fmt.Errorf("group bucket for group id %s"+ + "not found", string(groupId)) + } + + p, err := processGroupBucket( + ctx, sqlTx, perm, ruleSqlId, groupId, groupBucket, + ) + if err != nil { + return err + } + + params = append(params, p...) + + return nil + }) +} + +// processGroupBucket processes a single group bucket, which contains the +// session-wide kv records and as well as the feature-kv-stores key bucket for +// that group. For the session-wide kv records, it inserts the records into the +// SQL database and asserts that the migrated values match the original values. +func processGroupBucket(ctx context.Context, sqlTx SQLQueries, perm bool, + ruleSqlId int64, groupAlias []byte, + groupBucket *bbolt.Bucket) ([]kvParams, error) { + + groupSqlId, err := sqlTx.GetSessionIDByAlias( + ctx, groupAlias, + ) + if errors.Is(err, sql.ErrNoRows) { + return nil, fmt.Errorf("session with group id %x "+ + "not found in sql db", groupAlias) + } else if err != nil { + return nil, err + } + + var params []kvParams + + return params, groupBucket.ForEach(func(k, v []byte) error { + switch { + case v != nil: + // This is a non-feature specific k:v store for the + // session, i.e. the session-wide store. + sessWideParams := kvParams{ + EntryKey: string(k), + Value: v, + Perm: perm, + RuleID: ruleSqlId, + SessionID: sqldb.SQLInt64(groupSqlId), + } + + err := sqlTx.InsertKVStoreRecord(ctx, sessWideParams) + if err != nil { + return fmt.Errorf("inserting session wide kv "+ + "store record failed %w", err) + } + + params = append(params, sessWideParams) + + return nil + case bytes.Equal(k, featureKVStoreBucketKey): + // This is a feature specific k:v store for the + // session, which will be stored under the feature-name + // under this bucket. + + featureStoreBucket := groupBucket.Bucket( + featureKVStoreBucketKey, + ) + if featureStoreBucket == nil { + return fmt.Errorf("feature store bucket %s "+ + "for group id %s not found", + string(featureKVStoreBucketKey), + string(groupAlias)) + } + + p, err := processFeatureStoreBucket( + ctx, sqlTx, perm, ruleSqlId, groupSqlId, + featureStoreBucket, + ) + if err != nil { + return err + } + + params = append(params, p...) + + return nil + default: + return fmt.Errorf("unexpected bucket %s found under "+ + "the %s bucket", string(k), + string(sessKVStoreBucketKey)) + } + }) +} + +// processFeatureStoreBucket processes the feature-kv-store bucket under a +// group bucket, which contains the feature specific buckets for that group. +func processFeatureStoreBucket(ctx context.Context, sqlTx SQLQueries, perm bool, + ruleSqlId int64, groupSqlId int64, + featureStoreBucket *bbolt.Bucket) ([]kvParams, error) { + + var params []kvParams + + return params, featureStoreBucket.ForEach(func(k, v []byte) error { + if v != nil { + return fmt.Errorf("expected only buckets under " + + "feature stores bucket") + } + + featureName := k + featureNameBucket := featureStoreBucket.Bucket(featureName) + if featureNameBucket == nil { + return fmt.Errorf("feature bucket %s not found", + string(featureName)) + } + + featureSqlId, err := sqlTx.GetOrInsertFeatureID( + ctx, string(featureName), + ) + if err != nil { + return err + } + + p, err := processFeatureNameBucket( + ctx, sqlTx, perm, ruleSqlId, groupSqlId, featureSqlId, + featureNameBucket, + ) + if err != nil { + return err + } + + params = append(params, p...) + + return nil + }) +} + +// processFeatureNameBucket processes a single feature name bucket, which +// contains the feature specific key-value store records for that group. +// It inserts the records into the SQL database and asserts that +// the migrated values match the original values in the KV store. +func processFeatureNameBucket(ctx context.Context, sqlTx SQLQueries, perm bool, + ruleSqlId int64, groupSqlId int64, featureSqlId int64, + featureNameBucket *bbolt.Bucket) ([]kvParams, error) { + + var params []kvParams + + return params, featureNameBucket.ForEach(func(k, v []byte) error { + if v == nil { + return fmt.Errorf("expected only key-values under "+ + "feature name bucket, but found bucket %s", + string(k)) + } + + featureParams := kvParams{ + EntryKey: string(k), + Value: v, + Perm: perm, + RuleID: ruleSqlId, + SessionID: sqldb.SQLInt64(groupSqlId), + FeatureID: sqldb.SQLInt64(featureSqlId), + } + + err := sqlTx.InsertKVStoreRecord(ctx, featureParams) + if err != nil { + return fmt.Errorf("inserting feature specific kv "+ + "store record failed %w", err) + } + + params = append(params, featureParams) + + return nil + }) +} diff --git a/firewalldb/sql_migration_test.go b/firewalldb/sql_migration_test.go new file mode 100644 index 000000000..c068671cc --- /dev/null +++ b/firewalldb/sql_migration_test.go @@ -0,0 +1,537 @@ +package firewalldb + +import ( + "context" + "database/sql" + "fmt" + "github.com/lightningnetwork/lnd/fn" + "testing" + "time" + + "github.com/lightninglabs/lightning-terminal/accounts" + "github.com/lightninglabs/lightning-terminal/db" + "github.com/lightninglabs/lightning-terminal/db/sqlc" + "github.com/lightninglabs/lightning-terminal/session" + "github.com/lightningnetwork/lnd/clock" + "github.com/lightningnetwork/lnd/sqldb" + "github.com/stretchr/testify/require" + "golang.org/x/exp/rand" +) + +// kvStoreRecord represents a single KV entry inserted into the BoltDB. +type kvStoreRecord struct { + Perm bool + RuleName string + EntryKey string + Global bool + GroupID *session.ID + FeatureName fn.Option[string] // Set if the record is feature specific + Value []byte +} + +// TestFirewallDBMigration tests the migration of firewalldb from a bolt +// backed to a SQL database. Note that this test does not attempt to be a +// complete migration test. +// This test only tests the migration of the KV stores currently, but will +// be extended in the future to also test the migration of the privacy mapper +// and the actions store in the future. +func TestFirewallDBMigration(t *testing.T) { + t.Parallel() + + ctx := context.Background() + clock := clock.NewTestClock(time.Now()) + + // When using build tags that creates a kvdb store for NewTestDB, we + // skip this test as it is only applicable for postgres and sqlite tags. + store := NewTestDB(t, clock) + if _, ok := store.(*BoltDB); ok { + t.Skipf("Skipping Firewall DB migration test for kvdb build") + } + + makeSQLDB := func(t *testing.T, sessionsStore session.Store) (*SQLDB, + *db.TransactionExecutor[SQLQueries]) { + + testDBStore := NewTestDBWithSessions(t, sessionsStore, clock) + + store, ok := testDBStore.(*SQLDB) + require.True(t, ok) + + baseDB := store.BaseDB + + genericExecutor := db.NewTransactionExecutor( + baseDB, func(tx *sql.Tx) SQLQueries { + return baseDB.WithTx(tx) + }, + ) + + return store, genericExecutor + } + + // The assertMigrationResults function will currently assert that + // the migrated kv stores records in the SQLDB match the original kv + // stores records in the BoltDB. + assertMigrationResults := func(t *testing.T, sqlStore *SQLDB, + kvRecords []kvStoreRecord) { + + var ( + ruleIDs = make(map[string]int64) + groupIDs = make(map[string]int64) + featureIDs = make(map[string]int64) + err error + ) + + getRuleID := func(ruleName string) int64 { + ruleID, ok := ruleIDs[ruleName] + if !ok { + ruleID, err = sqlStore.GetRuleID( + ctx, ruleName, + ) + require.NoError(t, err) + + ruleIDs[ruleName] = ruleID + } + + return ruleID + } + + getGroupID := func(groupAlias []byte) int64 { + groupID, ok := groupIDs[string(groupAlias)] + if !ok { + groupID, err = sqlStore.GetSessionIDByAlias( + ctx, groupAlias, + ) + require.NoError(t, err) + + groupIDs[string(groupAlias)] = groupID + } + + return groupID + } + + getFeatureID := func(featureName string) int64 { + featureID, ok := featureIDs[featureName] + if !ok { + featureID, err = sqlStore.GetFeatureID( + ctx, featureName, + ) + require.NoError(t, err) + + featureIDs[featureName] = featureID + } + + return featureID + } + + // First we extract all migrated kv records from the SQLDB, + // in order to be able to compare them to the original kv + // records, to ensure that the migration was successful. + sqlKvRecords, err := sqlStore.ListAllKVStoresRecords(ctx) + require.NoError(t, err) + require.Equal(t, len(kvRecords), len(sqlKvRecords)) + + for _, kvRecord := range kvRecords { + ruleID := getRuleID(kvRecord.RuleName) + + if kvRecord.Global { + sqlVal, err := sqlStore.GetGlobalKVStoreRecord( + ctx, + sqlc.GetGlobalKVStoreRecordParams{ + Key: kvRecord.EntryKey, + Perm: kvRecord.Perm, + RuleID: ruleID, + }, + ) + require.NoError(t, err) + require.Equal(t, kvRecord.Value, sqlVal) + } else if kvRecord.FeatureName.IsNone() { + groupID := getGroupID(kvRecord.GroupID[:]) + + sqlVal, err := sqlStore.GetSessionKVStoreRecord( + ctx, + sqlc.GetSessionKVStoreRecordParams{ + Key: kvRecord.EntryKey, + Perm: kvRecord.Perm, + RuleID: ruleID, + SessionID: sql.NullInt64{ + Int64: groupID, + Valid: true, + }, + }, + ) + require.NoError(t, err) + require.Equal(t, kvRecord.Value, sqlVal) + } else { + groupID := getGroupID(kvRecord.GroupID[:]) + featureID := getFeatureID( + kvRecord.FeatureName.UnwrapOrFail(t), + ) + + sqlVal, err := sqlStore.GetFeatureKVStoreRecord( + ctx, + sqlc.GetFeatureKVStoreRecordParams{ + Key: kvRecord.EntryKey, + Perm: kvRecord.Perm, + RuleID: ruleID, + SessionID: sql.NullInt64{ + Int64: groupID, + Valid: true, + }, + FeatureID: sql.NullInt64{ + Int64: featureID, + Valid: true, + }, + }, + ) + require.NoError(t, err) + require.Equal(t, kvRecord.Value, sqlVal) + } + } + } + + // The tests slice contains all the tests that we will run for the + // migration of the firewalldb from a BoltDB to a SQLDB. + // Note that the tests currently only test the migration of the KV + // stores, but will be extended in the future to also test the migration + // of the privacy mapper and the actions store. + tests := []struct { + name string + populateDB func(t *testing.T, ctx context.Context, + boltDB *BoltDB, + sessionStore session.Store) []kvStoreRecord + }{ + { + name: "empty", + populateDB: func(t *testing.T, ctx context.Context, + boltDB *BoltDB, + sessionStore session.Store) []kvStoreRecord { + + // Don't populate the DB. + return make([]kvStoreRecord, 0) + }, + }, + { + name: "global records", + populateDB: globalRecords, + }, + { + name: "session specific records", + populateDB: sessionSpecificRecords, + }, + { + name: "feature specific records", + populateDB: featureSpecificRecords, + }, + { + name: "records at all levels", + populateDB: recordsAtAllLevels, + }, + { + name: "random records", + populateDB: randomKVRecords, + }, + } + + for _, test := range tests { + tc := test + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + // First let's create a sessions store to link to in + // the kvstores DB. In order to create the sessions + // store though, we also need to create an accounts + // store, that we link to the sessions store. + // Note that both of these stores will be sql stores due + // to the build tags enabled when running this test, + // which means we can also pass the sessions store to + // the sql version of the kv stores that we'll create + // in test, without also needing to migrate it. + accountStore := accounts.NewTestDB(t, clock) + sessionsStore := session.NewTestDBWithAccounts( + t, clock, accountStore, + ) + + // Create a new firewall store to populate with test + // data. + firewallStore, err := NewBoltDB( + t.TempDir(), DBFilename, sessionsStore, + accountStore, clock, + ) + require.NoError(t, err) + t.Cleanup(func() { + require.NoError(t, firewallStore.Close()) + }) + + // Populate the kv store. + records := test.populateDB( + t, ctx, firewallStore, sessionsStore, + ) + + // Create the SQL store that we will migrate the data + // to. + sqlStore, txEx := makeSQLDB(t, sessionsStore) + + // Perform the migration. + var opts sqldb.MigrationTxOptions + err = txEx.ExecTx(ctx, &opts, + func(tx SQLQueries) error { + return MigrateFirewallDBToSQL( + ctx, firewallStore.DB, tx, + ) + }, + ) + require.NoError(t, err) + + // Assert migration results. + assertMigrationResults(t, sqlStore, records) + }) + } +} + +// globalRecords populates the kv store with one global record for the temp +// store, and one for the perm store. +func globalRecords(t *testing.T, ctx context.Context, + boltDB *BoltDB, sessionStore session.Store) []kvStoreRecord { + + return insertTestKVRecords( + t, ctx, boltDB, sessionStore, true, fn.None[string](), + ) +} + +// sessionSpecificRecords populates the kv store with one session specific +// record for the local temp store, and one session specific record for the perm +// local store. +func sessionSpecificRecords(t *testing.T, ctx context.Context, + boltDB *BoltDB, sessionStore session.Store) []kvStoreRecord { + + return insertTestKVRecords( + t, ctx, boltDB, sessionStore, false, fn.None[string](), + ) +} + +// featureSpecificRecords populates the kv store with one feature specific +// record for the local temp store, and one feature specific record for the perm +// local store. +func featureSpecificRecords(t *testing.T, ctx context.Context, + boltDB *BoltDB, sessionStore session.Store) []kvStoreRecord { + + return insertTestKVRecords( + t, ctx, boltDB, sessionStore, false, fn.Some("test-feature"), + ) +} + +// recordsAtAllLevels uses adds a record at all possible levels of the kvstores, +// by utilizing all the other helper functions that populates the kvstores at +// different levels. +func recordsAtAllLevels(t *testing.T, ctx context.Context, + boltDB *BoltDB, sessionStore session.Store) []kvStoreRecord { + + gRecords := globalRecords(t, ctx, boltDB, sessionStore) + sRecords := sessionSpecificRecords(t, ctx, boltDB, sessionStore) + fRecords := featureSpecificRecords(t, ctx, boltDB, sessionStore) + + return append(gRecords, append(sRecords, fRecords...)...) +} + +// insertTestKVRecords populates the kv store with one record for the local temp +// store, and one record for the local store. The records will be feature +// specific if the featureNameOpt is set, otherwise they will be session +// specific. Both of the records will be inserted with the same +// session.GroupID, which is created in this function, as well as the same +// ruleName, entryKey and entryVal. +func insertTestKVRecords(t *testing.T, ctx context.Context, + boltDB *BoltDB, sessionStore session.Store, global bool, + featureNameOpt fn.Option[string]) []kvStoreRecord { + + var ( + ruleName = "test-rule" + entryKey = "test1" + entryVal = []byte{1, 2, 3} + ) + + // Create a session that we can reference. + sess, err := sessionStore.NewSession( + ctx, "test", session.TypeAutopilot, + time.Unix(1000, 0), "something", + ) + require.NoError(t, err) + + tempKvRecord := kvStoreRecord{ + RuleName: ruleName, + GroupID: &sess.GroupID, + FeatureName: featureNameOpt, + EntryKey: entryKey, + Value: entryVal, + Perm: false, + Global: global, + } + + insertKvRecord(t, ctx, boltDB, tempKvRecord) + + permKvRecord := kvStoreRecord{ + RuleName: ruleName, + GroupID: &sess.GroupID, + FeatureName: featureNameOpt, + EntryKey: entryKey, + Value: entryVal, + Perm: true, + Global: global, + } + + insertKvRecord(t, ctx, boltDB, permKvRecord) + + return []kvStoreRecord{tempKvRecord, permKvRecord} +} + +// insertTestKVRecords populates the kv store with passed record, and asserts +// that the record is inserted correctly. +func insertKvRecord(t *testing.T, ctx context.Context, + boltDB *BoltDB, record kvStoreRecord) { + + if record.Global && record.FeatureName.IsSome() { + t.Fatalf("cannot set both global and feature specific at the " + + "same time") + } + + kvStores := boltDB.GetKVStores( + record.RuleName, *record.GroupID, + record.FeatureName.UnwrapOr(""), + ) + + err := kvStores.Update(ctx, func(ctx context.Context, + tx KVStoreTx) error { + + switch { + case record.Global && !record.Perm: + return tx.GlobalTemp().Set( + ctx, record.EntryKey, record.Value, + ) + case record.Global && record.Perm: + return tx.Global().Set( + ctx, record.EntryKey, record.Value, + ) + case !record.Global && !record.Perm: + return tx.LocalTemp().Set( + ctx, record.EntryKey, record.Value, + ) + case !record.Global && record.Perm: + return tx.Local().Set( + ctx, record.EntryKey, record.Value, + ) + default: + return fmt.Errorf("unexpected global/perm "+ + "combination: global=%v, perm=%v", + record.Global, record.Perm) + } + }) + require.NoError(t, err) +} + +// randomKVRecords populates the kv store with random kv records that span +// across all possible combinations of different levels of records in the kv +// store. All values and different bucket names are randomly generated. +func randomKVRecords(t *testing.T, ctx context.Context, + boltDB *BoltDB, sessionStore session.Store) []kvStoreRecord { + + var ( + // We set the number of records to insert to 1000, as that + // should be enough to cover as many different + // combinations of records as possible, while still being + // fast enough to run in a reasonable time. + numberOfRecords = 1000 + insertedRecords = make([]kvStoreRecord, 0) + ruleName = "initial-rule" + groupId *session.ID + featureName = "initial-feature" + ) + + // Create a random session that we can reference for the initial group + // ID. + sess, err := sessionStore.NewSession( + ctx, "initial-session", session.Type(uint8(rand.Intn(5))), + time.Unix(1000, 0), randomString(rand.Intn(10)+1), + ) + require.NoError(t, err) + + groupId = &sess.GroupID + + // Generate random records. Note that many records will use the same + // rule name, group ID and feature name, to simulate the real world + // usage of the kv stores as much as possible. + for i := 0; i < numberOfRecords; i++ { + // On average, we will generate a new rule which will be used + // for the kv store record 10% of the time. + if rand.Intn(10) == 0 { + ruleName = fmt.Sprintf( + "rule-%s-%d", randomString(rand.Intn(30)+1), i, + ) + } + + // On average, we use the global store 25% of the time. + global := rand.Intn(4) == 0 + + // We'll use the perm store 50% of the time. + perm := rand.Intn(2) == 0 + + // For the non-global records, we will generate a new group ID + // 25% of the time. + if !global && rand.Intn(4) == 0 { + newSess, err := sessionStore.NewSession( + ctx, fmt.Sprintf("session-%d", i), + session.Type(uint8(rand.Intn(5))), + time.Unix(1000, 0), + randomString(rand.Intn(10)+1), + ) + require.NoError(t, err) + + groupId = &newSess.GroupID + } + + featureNameOpt := fn.None[string]() + + // For 50% of the non-global records, we insert a feature + // specific record. The other 50% will be session specific + // records. + if !global && rand.Intn(2) == 0 { + // 25% of the time, we will generate a new feature name. + if rand.Intn(4) == 0 { + featureName = fmt.Sprintf( + "feature-%s-%d", + randomString(rand.Intn(30)+1), i, + ) + } + + featureNameOpt = fn.Some(featureName) + } + + kvEntry := kvStoreRecord{ + RuleName: ruleName, + GroupID: groupId, + FeatureName: featureNameOpt, + EntryKey: fmt.Sprintf("key-%d", i), + Perm: perm, + Global: global, + // We'll generate a random value for all records, + Value: []byte(randomString(rand.Intn(100) + 1)), + } + + // Insert the record into the kv store. + insertKvRecord(t, ctx, boltDB, kvEntry) + + // Add the record to the list of inserted records. + insertedRecords = append(insertedRecords, kvEntry) + } + + return insertedRecords +} + +// randomString generates a random string of the passed length n. +func randomString(n int) string { + letterBytes := "abcdefghijklmnopqrstuvwxyz" + + b := make([]byte, n) + for i := range b { + b[i] = letterBytes[rand.Intn(len(letterBytes))] + } + return string(b) +} From 53900df9db4c83c8b1ac7a8938575bfdef50de8c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Viktor=20Tigerstr=C3=B6m?= Date: Sat, 14 Jun 2025 01:31:54 +0200 Subject: [PATCH 7/9] firewalldb: rename kv stores migration tests --- firewalldb/sql_migration_test.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/firewalldb/sql_migration_test.go b/firewalldb/sql_migration_test.go index c068671cc..7467238b2 100644 --- a/firewalldb/sql_migration_test.go +++ b/firewalldb/sql_migration_test.go @@ -210,23 +210,23 @@ func TestFirewallDBMigration(t *testing.T) { }, }, { - name: "global records", + name: "global kv records", populateDB: globalRecords, }, { - name: "session specific records", + name: "session specific kv records", populateDB: sessionSpecificRecords, }, { - name: "feature specific records", + name: "feature specific kv records", populateDB: featureSpecificRecords, }, { - name: "records at all levels", + name: "records at all kv levels", populateDB: recordsAtAllLevels, }, { - name: "random records", + name: "random kv records", populateDB: randomKVRecords, }, } From 73171529030ce3dc4a382aa081c03f11861ab440 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Viktor=20Tigerstr=C3=B6m?= Date: Tue, 17 Jun 2025 17:34:31 +0200 Subject: [PATCH 8/9] firewalldb: prepare migration tests for more stores Currently, the migration tests for firewalldb only migrates the kv stores. In future commits, we will also migrate the privacy mapper and the actions in the firewalldb,. Before this commit, the expected results of the migrations tests could only be kv records, which will not be the case when we also migrate the privacy mapper and the actions. Therefore, we prepare the migration tests to expect more than just kv records. This commit introduces a new type of `expectedResult` type which the prep of the migration tests will use, which can specify more than just one type of expected result. --- firewalldb/sql_migration_test.go | 79 +++++++++++++++++++++++--------- 1 file changed, 58 insertions(+), 21 deletions(-) diff --git a/firewalldb/sql_migration_test.go b/firewalldb/sql_migration_test.go index 7467238b2..44c76b9a0 100644 --- a/firewalldb/sql_migration_test.go +++ b/firewalldb/sql_migration_test.go @@ -4,7 +4,6 @@ import ( "context" "database/sql" "fmt" - "github.com/lightningnetwork/lnd/fn" "testing" "time" @@ -13,6 +12,7 @@ import ( "github.com/lightninglabs/lightning-terminal/db/sqlc" "github.com/lightninglabs/lightning-terminal/session" "github.com/lightningnetwork/lnd/clock" + "github.com/lightningnetwork/lnd/fn" "github.com/lightningnetwork/lnd/sqldb" "github.com/stretchr/testify/require" "golang.org/x/exp/rand" @@ -29,6 +29,10 @@ type kvStoreRecord struct { Value []byte } +type expectedResult struct { + kvRecords fn.Option[[]kvStoreRecord] +} + // TestFirewallDBMigration tests the migration of firewalldb from a bolt // backed to a SQL database. Note that this test does not attempt to be a // complete migration test. @@ -67,10 +71,7 @@ func TestFirewallDBMigration(t *testing.T) { return store, genericExecutor } - // The assertMigrationResults function will currently assert that - // the migrated kv stores records in the SQLDB match the original kv - // stores records in the BoltDB. - assertMigrationResults := func(t *testing.T, sqlStore *SQLDB, + assertKvStoreMigrationResults := func(t *testing.T, sqlStore *SQLDB, kvRecords []kvStoreRecord) { var ( @@ -188,6 +189,20 @@ func TestFirewallDBMigration(t *testing.T) { } } + // The assertMigrationResults function will currently assert that + // the migrated kv stores records in the SQLDB match the original kv + // stores records in the BoltDB. + assertMigrationResults := func(t *testing.T, sqlStore *SQLDB, + expRes *expectedResult) { + + // If the expected result contains kv records, then we + // assert that the kv store migration results match + // the expected results. + expRes.kvRecords.WhenSome(func(kvRecords []kvStoreRecord) { + assertKvStoreMigrationResults(t, sqlStore, kvRecords) + }) + } + // The tests slice contains all the tests that we will run for the // migration of the firewalldb from a BoltDB to a SQLDB. // Note that the tests currently only test the migration of the KV @@ -197,16 +212,22 @@ func TestFirewallDBMigration(t *testing.T) { name string populateDB func(t *testing.T, ctx context.Context, boltDB *BoltDB, - sessionStore session.Store) []kvStoreRecord + sessionStore session.Store) *expectedResult }{ { name: "empty", populateDB: func(t *testing.T, ctx context.Context, boltDB *BoltDB, - sessionStore session.Store) []kvStoreRecord { + sessionStore session.Store) *expectedResult { + + // Don't populate the DB, and return empty kv + // records and privacy pairs. - // Don't populate the DB. - return make([]kvStoreRecord, 0) + return &expectedResult{ + kvRecords: fn.Some( + []kvStoreRecord{}, + ), + } }, }, { @@ -291,7 +312,7 @@ func TestFirewallDBMigration(t *testing.T) { // globalRecords populates the kv store with one global record for the temp // store, and one for the perm store. func globalRecords(t *testing.T, ctx context.Context, - boltDB *BoltDB, sessionStore session.Store) []kvStoreRecord { + boltDB *BoltDB, sessionStore session.Store) *expectedResult { return insertTestKVRecords( t, ctx, boltDB, sessionStore, true, fn.None[string](), @@ -302,7 +323,7 @@ func globalRecords(t *testing.T, ctx context.Context, // record for the local temp store, and one session specific record for the perm // local store. func sessionSpecificRecords(t *testing.T, ctx context.Context, - boltDB *BoltDB, sessionStore session.Store) []kvStoreRecord { + boltDB *BoltDB, sessionStore session.Store) *expectedResult { return insertTestKVRecords( t, ctx, boltDB, sessionStore, false, fn.None[string](), @@ -313,7 +334,7 @@ func sessionSpecificRecords(t *testing.T, ctx context.Context, // record for the local temp store, and one feature specific record for the perm // local store. func featureSpecificRecords(t *testing.T, ctx context.Context, - boltDB *BoltDB, sessionStore session.Store) []kvStoreRecord { + boltDB *BoltDB, sessionStore session.Store) *expectedResult { return insertTestKVRecords( t, ctx, boltDB, sessionStore, false, fn.Some("test-feature"), @@ -324,13 +345,25 @@ func featureSpecificRecords(t *testing.T, ctx context.Context, // by utilizing all the other helper functions that populates the kvstores at // different levels. func recordsAtAllLevels(t *testing.T, ctx context.Context, - boltDB *BoltDB, sessionStore session.Store) []kvStoreRecord { + boltDB *BoltDB, sessionStore session.Store) *expectedResult { + + gRecords := globalRecords( + t, ctx, boltDB, sessionStore, + ).kvRecords.UnwrapOrFail(t) - gRecords := globalRecords(t, ctx, boltDB, sessionStore) - sRecords := sessionSpecificRecords(t, ctx, boltDB, sessionStore) - fRecords := featureSpecificRecords(t, ctx, boltDB, sessionStore) + sRecords := sessionSpecificRecords( + t, ctx, boltDB, sessionStore, + ).kvRecords.UnwrapOrFail(t) - return append(gRecords, append(sRecords, fRecords...)...) + fRecords := featureSpecificRecords( + t, ctx, boltDB, sessionStore, + ).kvRecords.UnwrapOrFail(t) + + allRecords := append(gRecords, append(sRecords, fRecords...)...) + + return &expectedResult{ + kvRecords: fn.Some(allRecords), + } } // insertTestKVRecords populates the kv store with one record for the local temp @@ -341,7 +374,7 @@ func recordsAtAllLevels(t *testing.T, ctx context.Context, // ruleName, entryKey and entryVal. func insertTestKVRecords(t *testing.T, ctx context.Context, boltDB *BoltDB, sessionStore session.Store, global bool, - featureNameOpt fn.Option[string]) []kvStoreRecord { + featureNameOpt fn.Option[string]) *expectedResult { var ( ruleName = "test-rule" @@ -380,7 +413,9 @@ func insertTestKVRecords(t *testing.T, ctx context.Context, insertKvRecord(t, ctx, boltDB, permKvRecord) - return []kvStoreRecord{tempKvRecord, permKvRecord} + return &expectedResult{ + kvRecords: fn.Some([]kvStoreRecord{tempKvRecord, permKvRecord}), + } } // insertTestKVRecords populates the kv store with passed record, and asserts @@ -431,7 +466,7 @@ func insertKvRecord(t *testing.T, ctx context.Context, // across all possible combinations of different levels of records in the kv // store. All values and different bucket names are randomly generated. func randomKVRecords(t *testing.T, ctx context.Context, - boltDB *BoltDB, sessionStore session.Store) []kvStoreRecord { + boltDB *BoltDB, sessionStore session.Store) *expectedResult { var ( // We set the number of records to insert to 1000, as that @@ -522,7 +557,9 @@ func randomKVRecords(t *testing.T, ctx context.Context, insertedRecords = append(insertedRecords, kvEntry) } - return insertedRecords + return &expectedResult{ + kvRecords: fn.Some(insertedRecords), + } } // randomString generates a random string of the passed length n. From bbe2b907943066cbd064c3367c8ea476fbb4c4c7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Viktor=20Tigerstr=C3=B6m?= Date: Tue, 17 Jun 2025 17:35:21 +0200 Subject: [PATCH 9/9] firewalldb: add privacy mapper SQL migration This commit introduces the migration logic for transitioning the privacy mapper store from kvdb to SQL. Note that as of this commit, the migration is not yet triggered by any production code, i.e. only tests execute the migration logic. --- firewalldb/sql_migration.go | 309 ++++++++++++++++++++++++++++++- firewalldb/sql_migration_test.go | 187 ++++++++++++++++++- 2 files changed, 492 insertions(+), 4 deletions(-) diff --git a/firewalldb/sql_migration.go b/firewalldb/sql_migration.go index 092b61c8e..e8ab2adaf 100644 --- a/firewalldb/sql_migration.go +++ b/firewalldb/sql_migration.go @@ -15,6 +15,11 @@ import ( // line length in the migration code. type kvParams = sqlc.InsertKVStoreRecordParams +// privacyPairs is a type alias for a map that holds the privacy pairs, where +// the outer key is the group ID, and the value is a map of real to pseudo +// values. +type privacyPairs = map[int64]map[string]string + // MigrateFirewallDBToSQL runs the migration of the firwalldb stores from the // bbolt database to a SQL database. The migration is done in a single // transaction to ensure that all rows in the stores are migrated or none at @@ -35,10 +40,14 @@ func MigrateFirewallDBToSQL(ctx context.Context, kvStore *bbolt.DB, return err } + err = migratePrivacyMapperDBToSQL(ctx, kvStore, tx) + if err != nil { + return err + } + log.Infof("The rules DB has been migrated from KV to SQL.") - // TODO(viktor): Add migration for the privacy mapper and the action - // stores. + // TODO(viktor): Add migration for the action stores. return nil } @@ -484,3 +493,299 @@ func processFeatureNameBucket(ctx context.Context, sqlTx SQLQueries, perm bool, return nil }) } + +func migratePrivacyMapperDBToSQL(ctx context.Context, kvStore *bbolt.DB, + sqlTx SQLQueries) error { + + log.Infof("Starting migration of the privacy mapper store to SQL") + + // 1) Collect all privacy pairs from the KV store. + privPairs, err := collectPrivacyPairs(ctx, kvStore, sqlTx) + if err != nil { + return fmt.Errorf("error migrating privacy mapper store: %w", + err) + } + + // 2) Insert all collected privacy pairs into the SQL database. + err = insertPrivacyPairs(ctx, sqlTx, privPairs) + if err != nil { + return fmt.Errorf("insertion of privacy pairs failed: %w", err) + } + + // 3) Validate that all inserted privacy pairs match the original values + // in the KV store. Note that this is done after all values have been + // inserted, to ensure that the migration doesn't overwrite any values + // after they were inserted. + err = validatePrivacyPairsMigration(ctx, sqlTx, privPairs) + if err != nil { + return fmt.Errorf("migration validation of privacy pairs "+ + "failed: %w", err) + } + + log.Infof("Migration of the privacy mapper stores to SQL completed. "+ + "Total number of rows migrated: %d", len(privPairs)) + return nil +} + +// collectPrivacyPairs collects all privacy pairs from the KV store and +// returns them as the privacyPairs type alias. +func collectPrivacyPairs(ctx context.Context, kvStore *bbolt.DB, + sqlTx SQLQueries) (privacyPairs, error) { + + groupPairs := make(privacyPairs) + + return groupPairs, kvStore.View(func(kvTx *bbolt.Tx) error { + bkt := kvTx.Bucket(privacyBucketKey) + if bkt == nil { + // If we haven't generated any privacy bucket yet, + // we can skip the migration, as there are no privacy + // pairs to migrate. + return nil + } + + return bkt.ForEach(func(groupId, v []byte) error { + if v != nil { + return fmt.Errorf("expected only buckets "+ + "under %s bkt, but found value %s", + privacyBucketKey, v) + } + + gBkt := bkt.Bucket(groupId) + if gBkt == nil { + return fmt.Errorf("group bkt for group id "+ + "%s not found", groupId) + } + + groupSqlId, err := sqlTx.GetSessionIDByAlias( + ctx, groupId, + ) + if errors.Is(err, sql.ErrNoRows) { + return fmt.Errorf("session with group id %x "+ + "not found in sql db", groupId) + } else if err != nil { + return err + } + + groupRealToPseudoPairs, err := collectGroupPairs(gBkt) + if err != nil { + return fmt.Errorf("processing group bkt "+ + "for group id %s (sqlID %d) failed: %w", + groupId, groupSqlId, err) + } + + groupPairs[groupSqlId] = groupRealToPseudoPairs + + return nil + }) + }) +} + +// collectGroupPairs collects all privacy pairs for a specific session group, +// i.e. the group buckets under the privacy mapper bucket in the KV store. +// The function returns them as a map, where the key is the real value, and +// the value for the key is the pseudo values. +// It also checks that the pairs are consistent, i.e. that for each real value +// there is a corresponding pseudo value, and vice versa. If the pairs are +// inconsistent, it returns an error indicating the mismatch. +func collectGroupPairs(bkt *bbolt.Bucket) (map[string]string, error) { + var ( + realToPseudoRes map[string]string + pseudoToRealRes map[string]string + err error + missMatchErr = errors.New("privacy mapper pairs mismatch") + ) + + if realBkt := bkt.Bucket(realToPseudoKey); realBkt != nil { + realToPseudoRes, err = collectPairs(realBkt) + if err != nil { + return nil, fmt.Errorf("fetching real to pseudo pairs "+ + "failed: %w", err) + } + } else { + return nil, fmt.Errorf("%s bucket not found", realToPseudoKey) + } + + if pseudoBkt := bkt.Bucket(pseudoToRealKey); pseudoBkt != nil { + pseudoToRealRes, err = collectPairs(pseudoBkt) + if err != nil { + return nil, fmt.Errorf("fetching pseudo to real pairs "+ + "failed: %w", err) + } + } else { + return nil, fmt.Errorf("%s bucket not found", pseudoToRealKey) + } + + if len(realToPseudoRes) != len(pseudoToRealRes) { + return nil, missMatchErr + } + + for realVal, pseudoVal := range realToPseudoRes { + if rv, ok := pseudoToRealRes[pseudoVal]; !ok || rv != realVal { + return nil, missMatchErr + } + } + + return realToPseudoRes, nil +} + +// collectPairs collects all privacy pairs from a specific realToPseudoKey or +// pseudoToRealKey bucket in the KV store. It returns a map where the key is +// the real value or pseudo value, and the value is the corresponding pseudo +// value or real value, respectively (depending on if the realToPseudo or +// pseudoToReal bucket is passed to the function). +func collectPairs(pairsBucket *bbolt.Bucket) (map[string]string, error) { + pairsRes := make(map[string]string) + + return pairsRes, pairsBucket.ForEach(func(k, v []byte) error { + if v == nil { + return fmt.Errorf("expected only key-values under "+ + "pairs bucket, but found bucket %s", k) + } + + if len(v) == 0 { + return fmt.Errorf("empty value stored for privacy "+ + "pairs key %s", k) + } + + pairsRes[string(k)] = string(v) + + return nil + }) +} + +// insertPrivacyPairs inserts the collected privacy pairs into the SQL database. +func insertPrivacyPairs(ctx context.Context, sqlTx SQLQueries, + pairs privacyPairs) error { + + for groupId, groupPairs := range pairs { + err := insertGroupPairs(ctx, sqlTx, groupPairs, groupId) + if err != nil { + return fmt.Errorf("inserting group pairs for group "+ + "id %d failed: %w", groupId, err) + } + } + + return nil +} + +// insertGroupPairs inserts the privacy pairs for a specific group into +// the SQL database. It checks for duplicates before inserting, and returns +// an error if a duplicate pair is found. The function takes a map of real +// to pseudo values, where the key is the real value and the value is the +// corresponding pseudo value. +func insertGroupPairs(ctx context.Context, sqlTx SQLQueries, + pairs map[string]string, groupID int64) error { + + for realVal, pseudoVal := range pairs { + _, err := sqlTx.GetPseudoForReal( + ctx, sqlc.GetPseudoForRealParams{ + GroupID: groupID, + RealVal: realVal, + }, + ) + if err == nil { + return fmt.Errorf("duplicate privacy pair %s:%s: %w", + realVal, pseudoVal, ErrDuplicatePseudoValue) + } else if !errors.Is(err, sql.ErrNoRows) { + return err + } + + _, err = sqlTx.GetRealForPseudo( + ctx, sqlc.GetRealForPseudoParams{ + GroupID: groupID, + PseudoVal: pseudoVal, + }, + ) + if err == nil { + return fmt.Errorf("duplicate privacy pair %s:%s: %w", + realVal, pseudoVal, ErrDuplicatePseudoValue) + } else if !errors.Is(err, sql.ErrNoRows) { + return err + } + + err = sqlTx.InsertPrivacyPair( + ctx, sqlc.InsertPrivacyPairParams{ + GroupID: groupID, + RealVal: realVal, + PseudoVal: pseudoVal, + }, + ) + if err != nil { + return fmt.Errorf("inserting privacy pair %s:%s "+ + "failed: %w", realVal, pseudoVal, err) + } + } + + return nil +} + +// validatePrivacyPairsMigration validates that the migrated privacy pairs +// match the original values in the KV store. +func validatePrivacyPairsMigration(ctx context.Context, sqlTx SQLQueries, + pairs privacyPairs) error { + + for groupId, groupPairs := range pairs { + err := validateGroupPairsMigration( + ctx, sqlTx, groupPairs, groupId, + ) + if err != nil { + return fmt.Errorf("migration validation of privacy "+ + "pairs for group %d failed: %w", groupId, err) + } + } + + return nil +} + +// validateGroupPairsMigration validates that the migrated privacy pairs for +// a specific group match the original values in the KV store. It checks that +// for each real value, the pseudo value in the SQL database matches the +// original pseudo value, and vice versa. If any mismatch is found, it returns +// an error indicating the mismatch. +func validateGroupPairsMigration(ctx context.Context, sqlTx SQLQueries, + pairs map[string]string, groupID int64) error { + + for realVal, pseudoVal := range pairs { + resPseudoVal, err := sqlTx.GetPseudoForReal( + ctx, sqlc.GetPseudoForRealParams{ + GroupID: groupID, + RealVal: realVal, + }, + ) + if errors.Is(err, sql.ErrNoRows) { + return fmt.Errorf("migrated privacy pair %s:%s not "+ + "found for real value", realVal, pseudoVal) + } + if err != nil { + return err + } + + if resPseudoVal != pseudoVal { + return fmt.Errorf("pseudo value in db %s, does not "+ + "match original value %s, for real value %s", + resPseudoVal, pseudoVal, realVal) + } + + resRealVal, err := sqlTx.GetRealForPseudo( + ctx, sqlc.GetRealForPseudoParams{ + GroupID: groupID, + PseudoVal: pseudoVal, + }, + ) + if errors.Is(err, sql.ErrNoRows) { + return fmt.Errorf("migrated privacy pair %s:%s not "+ + "found for pseudo value", realVal, pseudoVal) + } + if err != nil { + return err + } + + if resRealVal != realVal { + return fmt.Errorf("real value in db %s, does not "+ + "match original value %s, for pseudo value %s", + resRealVal, realVal, pseudoVal) + } + } + + return nil +} diff --git a/firewalldb/sql_migration_test.go b/firewalldb/sql_migration_test.go index 44c76b9a0..3aa9112ca 100644 --- a/firewalldb/sql_migration_test.go +++ b/firewalldb/sql_migration_test.go @@ -31,6 +31,7 @@ type kvStoreRecord struct { type expectedResult struct { kvRecords fn.Option[[]kvStoreRecord] + privPairs fn.Option[privacyPairs] } // TestFirewallDBMigration tests the migration of firewalldb from a bolt @@ -189,9 +190,32 @@ func TestFirewallDBMigration(t *testing.T) { } } + assertPrivacyMapperMigrationResults := func(t *testing.T, + sqlStore *SQLDB, privPairs privacyPairs) { + + for groupID, groupPairs := range privPairs { + storePairs, err := sqlStore.GetAllPrivacyPairs( + ctx, groupID, + ) + require.NoError(t, err) + require.Len(t, storePairs, len(groupPairs)) + + for _, storePair := range storePairs { + // Assert that the store pair is in the + // original pairs. + pseudo, ok := groupPairs[storePair.RealVal] + require.True(t, ok) + + // Assert that the pseudo value matches + // the one in the store. + require.Equal(t, pseudo, storePair.PseudoVal) + } + } + } + // The assertMigrationResults function will currently assert that - // the migrated kv stores records in the SQLDB match the original kv - // stores records in the BoltDB. + // the migrated kv stores records and privacy pairs in the SQLDB match + // the original entries in the BoltDB. assertMigrationResults := func(t *testing.T, sqlStore *SQLDB, expRes *expectedResult) { @@ -201,6 +225,13 @@ func TestFirewallDBMigration(t *testing.T) { expRes.kvRecords.WhenSome(func(kvRecords []kvStoreRecord) { assertKvStoreMigrationResults(t, sqlStore, kvRecords) }) + + // If the expected result contains privacy pairs, then we + // assert that the privacy mapper migration results match + // the expected results. + expRes.privPairs.WhenSome(func(pairs privacyPairs) { + assertPrivacyMapperMigrationResults(t, sqlStore, pairs) + }) } // The tests slice contains all the tests that we will run for the @@ -223,10 +254,13 @@ func TestFirewallDBMigration(t *testing.T) { // Don't populate the DB, and return empty kv // records and privacy pairs. + pairsRes := make(privacyPairs) + return &expectedResult{ kvRecords: fn.Some( []kvStoreRecord{}, ), + privPairs: fn.Some(pairsRes), } }, }, @@ -250,6 +284,22 @@ func TestFirewallDBMigration(t *testing.T) { name: "random kv records", populateDB: randomKVRecords, }, + { + name: "one session and privacy pair", + populateDB: oneSessionAndPrivPair, + }, + { + name: "multiple sessions with one privacy pair", + populateDB: multiSessionsOnePrivPairs, + }, + { + name: "multiple privacy pairs", + populateDB: multipleSessionsAndPrivacyPairs, + }, + { + name: "random privacy pairs", + populateDB: randomPrivacyPairs, + }, } for _, test := range tests { @@ -363,6 +413,7 @@ func recordsAtAllLevels(t *testing.T, ctx context.Context, return &expectedResult{ kvRecords: fn.Some(allRecords), + privPairs: fn.None[privacyPairs](), } } @@ -415,6 +466,8 @@ func insertTestKVRecords(t *testing.T, ctx context.Context, return &expectedResult{ kvRecords: fn.Some([]kvStoreRecord{tempKvRecord, permKvRecord}), + // No privacy pairs are inserted in this test. + privPairs: fn.None[privacyPairs](), } } @@ -559,6 +612,136 @@ func randomKVRecords(t *testing.T, ctx context.Context, return &expectedResult{ kvRecords: fn.Some(insertedRecords), + // No privacy pairs are inserted in this test. + privPairs: fn.None[privacyPairs](), + } +} + +// multiSessionsOnePrivPairs inserts 1 session with 1 privacy pair into the +// boltDB. +func oneSessionAndPrivPair(t *testing.T, ctx context.Context, + boltDB *BoltDB, sessionStore session.Store) *expectedResult { + + return createPrivacyPairs(t, ctx, boltDB, sessionStore, 1, 1) +} + +// multiSessionsOnePrivPairs inserts 1 session with 10 privacy pairs into the +// boltDB. +func multiSessionsOnePrivPairs(t *testing.T, ctx context.Context, + boltDB *BoltDB, sessionStore session.Store) *expectedResult { + + return createPrivacyPairs(t, ctx, boltDB, sessionStore, 1, 10) +} + +// multipleSessionsAndPrivacyPairs inserts 5 sessions with 10 privacy pairs +// per session into the boltDB. +func multipleSessionsAndPrivacyPairs(t *testing.T, ctx context.Context, + boltDB *BoltDB, sessionStore session.Store) *expectedResult { + + return createPrivacyPairs(t, ctx, boltDB, sessionStore, 5, 10) +} + +// createPrivacyPairs is a helper function that creates a number of sessions +// with a number of privacy pairs per session. It returns an expectedResult +// struct that contains the expected privacy pairs and no kv records. +func createPrivacyPairs(t *testing.T, ctx context.Context, + boltDB *BoltDB, sessionStore session.Store, numSessions int, + numPairsPerSession int) *expectedResult { + + pairs := make(privacyPairs) + + sessSQLStore, ok := sessionStore.(*session.SQLStore) + require.True(t, ok) + + for i := 0; i < numSessions; i++ { + sess, err := sessionStore.NewSession( + ctx, fmt.Sprintf("session-%d", i), + session.Type(uint8(rand.Intn(5))), + time.Unix(1000, 0), randomString(rand.Intn(10)+1), + ) + require.NoError(t, err) + + groupID := sess.GroupID + sqlGroupID, err := sessSQLStore.GetSessionIDByAlias( + ctx, groupID[:], + ) + require.NoError(t, err) + + groupPairs := make(map[string]string) + + for j := 0; j < numPairsPerSession; j++ { + realKey := fmt.Sprintf("real-%d-%d", i, j) + pseudoKey := fmt.Sprintf("pseudo-%d-%d", i, j) + + f := func(ctx context.Context, tx PrivacyMapTx) error { + return tx.NewPair(ctx, realKey, pseudoKey) + } + + err := boltDB.PrivacyDB(groupID).Update(ctx, f) + require.NoError(t, err) + + groupPairs[realKey] = pseudoKey + } + + pairs[sqlGroupID] = groupPairs + } + + return &expectedResult{ + kvRecords: fn.None[[]kvStoreRecord](), + privPairs: fn.Some(pairs), + } +} + +// randomPrivacyPairs creates a random number of privacy pairs to 10 sessions. +func randomPrivacyPairs(t *testing.T, ctx context.Context, + boltDB *BoltDB, sessionStore session.Store) *expectedResult { + + numSessions := 10 + maxPairsPerSession := 20 + pairs := make(privacyPairs) + + sessSQLStore, ok := sessionStore.(*session.SQLStore) + require.True(t, ok) + + for i := 0; i < numSessions; i++ { + sess, err := sessionStore.NewSession( + ctx, fmt.Sprintf("rand-session-%d", i), + session.Type(uint8(rand.Intn(5))), + time.Unix(1000, 0), randomString(rand.Intn(10)+1), + ) + require.NoError(t, err) + + groupID := sess.GroupID + sqlGroupID, err := sessSQLStore.GetSessionIDByAlias( + ctx, groupID[:], + ) + require.NoError(t, err) + + numPairs := rand.Intn(maxPairsPerSession) + 1 + groupPairs := make(map[string]string) + + for j := 0; j < numPairs; j++ { + realKey := fmt.Sprintf("real-%s", + randomString(rand.Intn(10)+5)) + pseudoKey := fmt.Sprintf("pseudo-%s", + randomString(rand.Intn(10)+5)) + + f := func(ctx context.Context, tx PrivacyMapTx) error { + return tx.NewPair(ctx, realKey, pseudoKey) + } + + err := boltDB.PrivacyDB(groupID).Update(ctx, f) + require.NoError(t, err) + + groupPairs[realKey] = pseudoKey + } + + pairs[sqlGroupID] = groupPairs + } + + return &expectedResult{ + kvRecords: fn.None[[]kvStoreRecord](), + privPairs: fn.Some(pairs), } }