Skip to content

Nostr sqldb #835

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 14 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
662 changes: 608 additions & 54 deletions Cargo.lock

Large diffs are not rendered by default.

45 changes: 45 additions & 0 deletions crates/nostr-sqldb/Cargo.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,45 @@
[package]
name = "nostr-sqldb"
version = "0.41.0"
edition = "2021"
description = "SQL storage backend for Nostr apps"
authors.workspace = true
homepage.workspace = true
repository.workspace = true
license.workspace = true
readme = "README.md"
rust-version.workspace = true
keywords = ["nostr", "database", "postgres", "mysql", "sqlite"]

[dependencies]
nostr = { workspace = true, features = ["std"] }
nostr-database = { workspace = true, features = ["flatbuf"] }
tracing.workspace = true
diesel = { version = "2", features = ["serde_json"] }
diesel-async = { version = "0.5", features = ["deadpool"] }
diesel_migrations = { version = "2" }
deadpool = { version = "0.12", features = ["managed", "rt_tokio_1"] }

[features]
default = ["postgres"]
postgres = [
"diesel/postgres",
"diesel-async/postgres",
"diesel_migrations/postgres",
]

mysql = ["diesel/mysql", "diesel-async/mysql", "diesel_migrations/mysql"]
sqlite = [
"diesel/sqlite",
"diesel-async/sqlite",
"diesel_migrations/sqlite",
"diesel/returning_clauses_for_sqlite_3_35",
]

[dev-dependencies]
tokio.workspace = true
nostr-relay-builder = { workspace = true }
tracing-subscriber = { workspace = true }

[[example]]
name = "postgres-relay"
19 changes: 19 additions & 0 deletions crates/nostr-sqldb/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
# Nostr SQL database backend

SQL storage backend for nostr apps working with Postgres, SQLite and MySQL.

## State

**This library is in an ALPHA state**, things that are implemented generally
work but the API will change in breaking ways.

## Donations

`rust-nostr` is free and open-source. This means we do not earn any revenue by
selling it. Instead, we rely on your financial support. If you actively use any
of the `rust-nostr` libs/software/services, then please [donate](https://rust-nostr.org/donate).

## License

This project is distributed under the MIT software license - see the
[LICENSE](../../LICENSE) file for details
31 changes: 31 additions & 0 deletions crates/nostr-sqldb/examples/postgres-relay.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
// Copyright (c) 2025 Protom
// Distributed under the MIT software license

use std::time::Duration;

use nostr_database::prelude::*;
use nostr_relay_builder::prelude::*;
use nostr_sqldb::NostrPostgres;

// Your database URL
const DB_URL: &str = "postgres://postgres:password@localhost:5432";

#[tokio::main]
async fn main() -> Result<()> {
tracing_subscriber::fmt::init();

// Create a nostr db instance and run pending db migrations if any
let db = NostrPostgres::new(DB_URL).await?;

// Add db to builder
let builder = RelayBuilder::default().database(db);

// Create local relay
let relay = LocalRelay::run(builder).await?;
println!("Url: {}", relay.url());

// Keep up the program
loop {
tokio::time::sleep(Duration::from_secs(60)).await;
}
}
Empty file.
Empty file.
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
-- This file should undo anything in `up.sql`
DROP TABLE IF EXISTS event_tags;
DROP TABLE IF EXISTS events;
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
-- The actual event data
CREATE TABLE IF NOT EXISTS events (
id VARCHAR(64) PRIMARY KEY,
pubkey VARCHAR(64) NOT NULL,
created_at BIGINT NOT NULL,
kind BIGINT NOT NULL,
payload BLOB NOT NULL,
deleted BOOLEAN NOT NULL
);

-- Direct indexes
CREATE INDEX event_pubkey ON events (pubkey);
CREATE INDEX event_date ON events (created_at);
CREATE INDEX event_kind ON events (kind);
CREATE INDEX event_deleted ON events (deleted);

-- The tag index, the primary will give us the index automatically
CREATE TABLE IF NOT EXISTS event_tags (
tag VARCHAR(64) NOT NULL,
tag_value VARCHAR(512) NOT NULL,
event_id VARCHAR(64) NOT NULL
REFERENCES events (id)
ON DELETE CASCADE
ON UPDATE CASCADE,
PRIMARY KEY (tag, tag_value, event_id)
);
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
-- This file was automatically created by Diesel to setup helper functions
-- and other internal bookkeeping. This file is safe to edit, any future
-- changes will be added to existing projects as new migrations.

DROP FUNCTION IF EXISTS diesel_manage_updated_at(_tbl regclass);
DROP FUNCTION IF EXISTS diesel_set_updated_at();
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
-- This file was automatically created by Diesel to setup helper functions
-- and other internal bookkeeping. This file is safe to edit, any future
-- changes will be added to existing projects as new migrations.




-- Sets up a trigger for the given table to automatically set a column called
-- `updated_at` whenever the row is modified (unless `updated_at` was included
-- in the modified columns)
--
-- # Example
--
-- ```sql
-- CREATE TABLE users (id SERIAL PRIMARY KEY, updated_at TIMESTAMP NOT NULL DEFAULT NOW());
--
-- SELECT diesel_manage_updated_at('users');
-- ```
CREATE OR REPLACE FUNCTION diesel_manage_updated_at(_tbl regclass) RETURNS VOID AS $$
BEGIN
EXECUTE format('CREATE TRIGGER set_updated_at BEFORE UPDATE ON %s
FOR EACH ROW EXECUTE PROCEDURE diesel_set_updated_at()', _tbl);
END;
$$ LANGUAGE plpgsql;

CREATE OR REPLACE FUNCTION diesel_set_updated_at() RETURNS trigger AS $$
BEGIN
IF (
NEW IS DISTINCT FROM OLD AND
NEW.updated_at IS NOT DISTINCT FROM OLD.updated_at
) THEN
NEW.updated_at := current_timestamp;
END IF;
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
-- This file should undo anything in `up.sql`
DROP TABLE event_tags;
DROP TABLE events;
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
-- The actual event data
CREATE TABLE events (
id BYTEA PRIMARY KEY,
pubkey BYTEA NOT NULL,
created_at BIGINT NOT NULL,
kind BIGINT NOT NULL,
payload BYTEA NOT NULL,
deleted BOOLEAN NOT NULL
);

-- Direct indexes
CREATE INDEX event_pubkey ON events (pubkey);
CREATE INDEX event_date ON events (created_at);
CREATE INDEX event_kind ON events (kind);
CREATE INDEX event_deleted ON events (deleted);

-- The tag index, the primary will give us the index automatically
CREATE TABLE event_tags (
tag TEXT NOT NULL,
tag_value TEXT NOT NULL,
event_id BYTEA NOT NULL
REFERENCES events (id)
ON DELETE CASCADE
ON UPDATE CASCADE,
PRIMARY KEY (tag, tag_value, event_id)
);
Empty file.
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
-- This file should undo anything in `up.sql`
DROP TABLE event_tags;
DROP TABLE events;
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
-- The actual event data
CREATE TABLE events (
id VARCHAR(64) PRIMARY KEY,
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I would store the 32-byte array here, instead of the hex. The same for the pubkey and signature (64-byte array), also in the other schemas. What do your think?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'll have to check whether there is support for binary indexes in all dbs and if there are any performance penalties. First look seem to be doable. Debugging the data (with other sql clients) will get a bit harder though.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Regarding the debugging in SQL clients: I know some clients display the bytes as hex, but not sure if all. I've tried DBeaver and JetBrains Database tool, and both display the event ID and public key BLOBs as hex.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes at least PG is storing small binaries as hex values anyhow. I have dropped the signature column and set the column types to be binary. Testing with PG seems to have no issues for the other backends I'll have to check when I add the impls.

pubkey VARCHAR(64) NOT NULL,
created_at BIGINT NOT NULL,
kind BIGINT NOT NULL,
payload BLOB NOT NULL,
deleted BOOLEAN NOT NULL
);

-- Direct indexes
CREATE INDEX event_pubkey ON events (pubkey);
CREATE INDEX event_date ON events (created_at);
CREATE INDEX event_kind ON events (kind);
CREATE INDEX event_deleted ON events (deleted);

-- The tag index, the primary will give us the index automatically
CREATE TABLE event_tags (
tag TEXT NOT NULL,
tag_value TEXT NOT NULL,
event_id VARCHAR(64) NOT NULL
REFERENCES events (id)
ON DELETE CASCADE
ON UPDATE CASCADE,
PRIMARY KEY (tag, tag_value, event_id)
);
9 changes: 9 additions & 0 deletions crates/nostr-sqldb/mysql.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
# For documentation on how to configure this file,
# see https://diesel.rs/guides/configuring-diesel-cli

[print_schema]
file = "src/schema/mysql.rs"
custom_type_derives = ["diesel::query_builder::QueryId", "Clone"]

[migrations_directory]
dir = "migrations/mysql"
9 changes: 9 additions & 0 deletions crates/nostr-sqldb/postgres.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
# For documentation on how to configure this file,
# see https://diesel.rs/guides/configuring-diesel-cli

[print_schema]
file = "src/schema/postgres.rs"
custom_type_derives = ["diesel::query_builder::QueryId", "Clone"]

[migrations_directory]
dir = "migrations/postgres"
9 changes: 9 additions & 0 deletions crates/nostr-sqldb/sqlite.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
# For documentation on how to configure this file,
# see https://diesel.rs/guides/configuring-diesel-cli

[print_schema]
file = "src/schema/sqlite.rs"
custom_type_derives = ["diesel::query_builder::QueryId", "Clone"]

[migrations_directory]
dir = "migrations/sqlite"
16 changes: 16 additions & 0 deletions crates/nostr-sqldb/src/lib.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
mod migrations;
mod model;
mod query;
mod schema;

#[cfg(feature = "postgres")]
mod postgres;

#[cfg(feature = "mysql")]
pub use migrations::mysql::run_migrations;
#[cfg(feature = "postgres")]
pub use migrations::postgres::run_migrations;
#[cfg(feature = "sqlite")]
pub use migrations::sqlite::run_migrations;
#[cfg(feature = "postgres")]
pub use postgres::{postgres_connection_pool, NostrPostgres};
8 changes: 8 additions & 0 deletions crates/nostr-sqldb/src/migrations/mod.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
#[cfg(feature = "mysql")]
pub mod mysql;

#[cfg(feature = "postgres")]
pub mod postgres;

#[cfg(feature = "sqlite")]
pub mod sqlite;
19 changes: 19 additions & 0 deletions crates/nostr-sqldb/src/migrations/mysql.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
use diesel::{Connection, MysqlConnection};
use diesel_migrations::{embed_migrations, EmbeddedMigrations, MigrationHarness};
use nostr_database::DatabaseError;
use tracing::info;

const MIGRATIONS: EmbeddedMigrations = embed_migrations!("./migrations/mysql");

/// programatically run the db migrations
pub fn run_migrations(connection_string: &str) -> Result<(), DatabaseError> {
info!("Running db migrations in mysql database",);
let mut connection =
MysqlConnection::establish(connection_string).map_err(DatabaseError::backend)?;

let res = connection
.run_pending_migrations(MIGRATIONS)
.map_err(DatabaseError::Backend)?;
info!("Successfully executed mysql db migrations {:?}", res);
Ok(())
}
19 changes: 19 additions & 0 deletions crates/nostr-sqldb/src/migrations/postgres.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
use diesel::{Connection, PgConnection};
use diesel_migrations::{embed_migrations, EmbeddedMigrations, MigrationHarness};
use nostr_database::DatabaseError;
use tracing::info;

const MIGRATIONS: EmbeddedMigrations = embed_migrations!("./migrations/postgres");

/// programatically run the db migrations
pub fn run_migrations(connection_string: &str) -> Result<(), DatabaseError> {
info!("Running db migrations in postgres database",);
let mut connection =
PgConnection::establish(connection_string).map_err(DatabaseError::backend)?;

let res = connection
.run_pending_migrations(MIGRATIONS)
.map_err(DatabaseError::Backend)?;
info!("Successfully executed postgres db migrations {:?}", res);
Ok(())
}
19 changes: 19 additions & 0 deletions crates/nostr-sqldb/src/migrations/sqlite.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
use diesel::{Connection, SqliteConnection};
use diesel_migrations::{embed_migrations, EmbeddedMigrations, MigrationHarness};
use nostr_database::DatabaseError;
use tracing::info;

const MIGRATIONS: EmbeddedMigrations = embed_migrations!("./migrations/sqlite");

/// programatically run the db migrations
pub fn run_migrations(connection_string: &str) -> Result<(), DatabaseError> {
info!("Running db migrations in sqlite database",);
let mut connection =
SqliteConnection::establish(connection_string).map_err(DatabaseError::backend)?;

let res = connection
.run_pending_migrations(MIGRATIONS)
.map_err(DatabaseError::Backend)?;
info!("Successfully executed sqlite db migrations {:?}", res);
Ok(())
}
Loading