Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

37 changes: 37 additions & 0 deletions crates/sui-auth-events-move/Move.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
[package]
name = "auth_events"
edition = "2024.beta" # edition = "legacy" to use legacy (pre-2024) Move
# license = "" # e.g., "MIT", "GPL", "Apache 2.0"
# authors = ["..."] # e.g., ["Joe Smith (joesmith@noemail.com)", "John Snow (johnsnow@noemail.com)"]

[dependencies]
Sui = { git = "https://github.com/MystenLabs/sui.git", subdir = "crates/sui-framework/packages/sui-framework", rev = "framework/testnet" }

# For remote import, use the `{ git = "...", subdir = "...", rev = "..." }`.
# Revision can be a branch, a tag, and a commit hash.
# MyRemotePackage = { git = "https://some.remote/host.git", subdir = "remote/path", rev = "main" }

# For local dependencies use `local = path`. Path is relative to the package root
# Local = { local = "../path/to" }

# To resolve a version conflict and force a specific version for dependency
# override use `override = true`
# Override = { local = "../conflicting/version", override = true }

[addresses]
auth_events = "0x0"

# Named addresses will be accessible in Move as `@name`. They're also exported:
# for example, `std = "0x1"` is exported by the Standard Library.
# alice = "0xA11CE"

[dev-dependencies]
# The dev-dependencies section allows overriding dependencies for `--test` and
# `--dev` modes. You can introduce test-only dependencies here.
# Local = { local = "../path/to/dev-build" }

[dev-addresses]
# The dev-addresses section allows overwriting named addresses for the `--test`
# and `--dev` modes.
# alice = "0xB0B"

98 changes: 98 additions & 0 deletions crates/sui-auth-events-move/sources/auth_events.move
Original file line number Diff line number Diff line change
@@ -0,0 +1,98 @@
/// Module: auth_events
module auth_events::auth_events;

use std::hash;
use std::type_name;
use sui::bcs;
use std::ascii::String;

/// Error code for when the user has no access.
const ENoAccess: u64 = 0;

// If [e1, e2, e3] are events, digest = H(H(e3), H(H(e2), H(H(e1)))), most_recent_event = H(e3)
public struct StreamHead has key, store {
id: UID,
stream_id: String,
digest: vector<u8>,
most_recent_event_digest: vector<u8>,
count: u64,
}

// Creates a stream given a stream identifier.
public fun create_stream(ctx: &mut TxContext, stream_id: String): StreamHead {
StreamHead {
id: object::new(ctx),
stream_id: stream_id,
digest: vector::empty(),
most_recent_event_digest: vector::empty(),
count: 0,
}
}

public fun hash_two(lhs: vector<u8>, rhs: vector<u8>): vector<u8> {
let mut inputs = lhs;
inputs.append(rhs);
hash::sha3_256(inputs)
}

public fun add_to_stream<T: copy + drop>(
event: T,
stream_head: &mut StreamHead
) {
// This check effectively acts as an access control because the events defined in a module
// can only be instantiated by that module.
// Note that we could support dynamic or runtime-defined streams by instead doing access control with a capability object.
assert!(stream_head.stream_id == type_name::into_string(type_name::get<T>()), ENoAccess);

stream_head.count = stream_head.count + 1;
stream_head.most_recent_event_digest = hash::sha3_256(bcs::to_bytes(&event));
stream_head.digest = hash_two(stream_head.most_recent_event_digest, stream_head.digest);
}
Comment on lines +12 to +50
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

You could have StreamHead accept a phantom type parameter to leverage the type system to do the stream_id check for you.

Suggested change
// If [e1, e2, e3] are events, digest = H(H(e3), H(H(e2), H(H(e1)))), most_recent_event = H(e3)
public struct StreamHead has key, store {
id: UID,
stream_id: String,
digest: vector<u8>,
most_recent_event_digest: vector<u8>,
count: u64,
}
// Creates a stream given a stream identifier.
public fun create_stream(ctx: &mut TxContext, stream_id: String): StreamHead {
StreamHead {
id: object::new(ctx),
stream_id: stream_id,
digest: vector::empty(),
most_recent_event_digest: vector::empty(),
count: 0,
}
}
public fun hash_two(lhs: vector<u8>, rhs: vector<u8>): vector<u8> {
let mut inputs = lhs;
inputs.append(rhs);
hash::sha3_256(inputs)
}
public fun add_to_stream<T: copy + drop>(
event: T,
stream_head: &mut StreamHead
) {
// This check effectively acts as an access control because the events defined in a module
// can only be instantiated by that module.
// Note that we could support dynamic or runtime-defined streams by instead doing access control with a capability object.
assert!(stream_head.stream_id == type_name::into_string(type_name::get<T>()), ENoAccess);
stream_head.count = stream_head.count + 1;
stream_head.most_recent_event_digest = hash::sha3_256(bcs::to_bytes(&event));
stream_head.digest = hash_two(stream_head.most_recent_event_digest, stream_head.digest);
}
// If [e1, e2, e3] are events, digest = H(H(e3), H(H(e2), H(H(e1)))), most_recent_event = H(e3)
public struct StreamHead<phantom T: copy + drop> has key, store {
id: UID,
digest: vector<u8>,
most_recent_event_digest: vector<u8>,
count: u64,
}
// Creates a stream given a stream identifier.
public fun create_stream<T: copy + drop>(ctx: &mut TxContext): StreamHead<T> {
StreamHead {
id: object::new(ctx),
digest: vector::empty(),
most_recent_event_digest: vector::empty(),
count: 0,
}
}
public fun hash_two(lhs: vector<u8>, rhs: vector<u8>): vector<u8> {
let mut inputs = lhs;
inputs.append(rhs);
hash::sha3_256(inputs)
}
public fun add_to_stream<T: copy + drop>(
event: T,
stream_head: &mut StreamHead<T>
) {


// ------------------------------------------------------------
// Testing functions

use std::ascii;

public struct TestEvent has copy, drop {
color: u64,
}

#[test]
fun test_add_to_stream() {
let mut ctx = tx_context::dummy();
let type_name = type_name::into_string(type_name::get<TestEvent>());
assert!(type_name == ascii::string(b"0000000000000000000000000000000000000000000000000000000000000000::auth_events::TestEvent"));

let mut stream_head = create_stream(&mut ctx, type_name);
assert!(stream_head.stream_id == type_name);
assert!(stream_head.count == 0);
assert!(stream_head.digest == vector::empty());
assert!(stream_head.most_recent_event_digest == vector::empty());
let mut current_digest = stream_head.digest;

let mut num_events: u64 = 0;
while (num_events < 100) {
let test_event = TestEvent {
color: num_events,
};
add_to_stream(test_event, &mut stream_head);
num_events = num_events + 1;

assert!(stream_head.count == num_events);
assert!(stream_head.most_recent_event_digest == hash::sha3_256(bcs::to_bytes(&test_event)));
current_digest = hash_two(stream_head.most_recent_event_digest, current_digest);
assert!(stream_head.digest == current_digest);
};

transfer::public_share_object(stream_head);
}

#[test, expected_failure(abort_code = ENoAccess)]
fun test_add_to_stream_no_access() {
let mut ctx = tx_context::dummy();
let type_name = ascii::string(b"AnotherEvent");
let mut stream_head = create_stream(&mut ctx, type_name);
add_to_stream(TestEvent { color: 1 }, &mut stream_head);
transfer::public_share_object(stream_head);
}
43 changes: 43 additions & 0 deletions crates/sui-auth-events-move/sources/capy.move
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
module auth_events::capy;

use sui::event;
use auth_events::auth_events::{Self, StreamHead};
use std::type_name;

public struct Capy has key, store {
id: UID,
color: u8,
}

public struct CapyBorn has copy, drop {
id: ID,
color: u8,
}

fun init(ctx: &mut TxContext) {
let stream_id = type_name::into_string(type_name::get<CapyBorn>());
let stream_head = auth_events::create_stream(ctx, stream_id);
transfer::public_share_object(stream_head);
}

public entry fun new(color: u8, stream_head: &mut StreamHead, ctx: &mut TxContext) {
let capy = Capy {
id: object::new(ctx),
color: color,
};
emit_auth_event(CapyBorn {
id: object::id(&capy),
color: capy.color,
}, stream_head);
transfer::transfer(capy, tx_context::sender(ctx));
}

// TODO: Understand why using a generic is failing
public fun emit_auth_event(event: CapyBorn, stream_head: &mut StreamHead) {
auth_events::add_to_stream(event, stream_head);
event::emit(event);
}

// sui client ptb \
// --move-call "$PKG::capy::new" 3 \
// --gas-budget 10000000
1 change: 1 addition & 0 deletions crates/sui-light-client/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -36,3 +36,4 @@ reqwest.workspace = true
object_store.workspace = true
env_logger = "0.11.5"
log = "0.4.22"
sha3="0.10.6"
20 changes: 14 additions & 6 deletions crates/sui-light-client/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -20,15 +20,17 @@ The light client requires a config file and a directory to cache checkpoints, an

## Setup

The config file for the light client takes a URL for a full node, a directory (that must exist) and within the directory to name of the genesis blob for the Sui network.
The config file for the light client takes a URL for a full node, a directory to store checkpoint summaries (that must exist) and within the directory to name of the genesis blob for the Sui network.

```
full_node_url: "http://ord-mnt-rpcbig-06.mainnet.sui.io:9000"
full_node_url: "https://fullnode.mainnet.sui.io:443"
checkpoint_summary_dir: "checkpoints_dir"
genesis_filename: "genesis.blob"
object_store_url: "https://checkpoints.mainnet.sui.io"
graphql_url: "https://sui-mainnet.mystenlabs.com/graphql"
```

The genesis blob for the Sui mainnet can be found here: https://github.com/MystenLabs/sui-genesis/blob/main/mainnet/genesis.blob
The genesis blob for the Sui mainnet can be found here: https://github.com/MystenLabs/sui-genesis/blob/main/mainnet/genesis.blob. Download and place it inside the checkpoint summary directory.

## Sync

Expand All @@ -41,6 +43,8 @@ Where `light_client.yaml` is the config file above.

This command will download all end-of-epoch checkpoints, and check them for validity. They will be cached within the checkpoint summary directory for use by future invocations.

Internally, sync works in two steps. It first downloads the end-of-epoch checkpoint numbers into the `checkpoints.yaml` file (which needs to be present in the checkpoint summaries directory). Next, it downloads the corresponding checkpoint summaries.

## Check Transaction

To check a transaction was executed, as well as the events it emitted do:
Expand All @@ -55,8 +59,12 @@ Where the base58 encoding of the transaction ID is specified. If the transaction
To check an object provide its ID in the following way:

```
$ sui-light-client --config light_client.yaml object -o 0xc646887891adfc0540ec271fd0203603fb4c841a119ec1e00c469441
abfc7078
$ sui-light-client --config light_client.yaml object -o 0xa514c85e1844189a54f4bfabc0928cbcac2137b928bef61adade84bbb486fd1f
```

The object ID is represented in Hex as displayed in explorers. If the object exists in the latest state it is printed out in JSON, otherwise an error is printed.
The object ID is represented in Hex as displayed in explorers. If the object exists in the latest state it is printed out in JSON, otherwise an error is printed.

## Known issues

- Throws an error if `checkpoints.yaml` file is not there. We can probably fix this for situations when someone is starting from scratch.
- Throw a better error if `genesis.blob` is not found
Loading
Loading