Skip to content

Batch attestation slashibility checking #6219

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 29 commits into
base: unstable
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from 2 commits
Commits
Show all changes
29 commits
Select commit Hold shift + click to select a range
b5ac2a6
refactor to separate attn signing and db slashibility checks
eserilev Aug 4, 2024
a7fe031
adding comments, a few logs, some more TODO's
eserilev Aug 4, 2024
ee691f9
only download attn data once and mutate index when required
eserilev Aug 5, 2024
14cbae8
batch db txn
eserilev Aug 5, 2024
9b1302a
fix some tests, linting, logs
eserilev Aug 6, 2024
57bd693
remove unneeded clones
eserilev Aug 7, 2024
e4d5e79
Merge branch 'unstable' of https://github.com/sigp/lighthouse into ba…
eserilev Aug 7, 2024
73f1d55
linnt
eserilev Aug 7, 2024
d07564c
fmt
eserilev Aug 7, 2024
420ce57
working on test fixes
eserilev Aug 9, 2024
e9112b1
fix test
eserilev Aug 12, 2024
d7a023f
Merge branch 'unstable' of https://github.com/sigp/lighthouse into ba…
eserilev Aug 12, 2024
ecf42f0
prevent db commit when slashing is not avail
eserilev Aug 13, 2024
b469d65
add more granular metrics
eserilev Aug 15, 2024
cdf219b
fix test
eserilev Aug 23, 2024
9140a6d
Merge branch 'unstable' into batch-attestation-slashibility-checking
eserilev Sep 22, 2024
b3b818f
resolve merge conflicts
eserilev Oct 3, 2024
4715750
Resolve merge conflicts
eserilev Oct 3, 2024
509d926
merge conflicts
eserilev Nov 21, 2024
f5dacb6
remove unused import
eserilev Nov 22, 2024
98b2f7c
fix
eserilev Nov 23, 2024
67c6f3a
retry
eserilev Nov 23, 2024
c5c6c5b
merge conflicts
eserilev Apr 4, 2025
6ece2be
fix test
eserilev Apr 4, 2025
dece5b6
optimize publish aggs
eserilev Apr 4, 2025
d9e27c6
fix lint
eserilev Apr 4, 2025
a463e8d
fmt
eserilev Apr 4, 2025
f35a028
Merge branch 'unstable' of https://github.com/sigp/lighthouse into ba…
eserilev Apr 4, 2025
d8d6da9
linting
eserilev Apr 4, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions beacon_node/http_api/tests/fork_tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -152,11 +152,11 @@ async fn attestations_across_fork_with_skip_slots() {
assert!(!unaggregated_attestations.is_empty());
let fork_name = harness.spec.fork_name_at_slot::<E>(fork_slot);
client
.post_beacon_pool_attestations_v1(&unaggregated_attestations)
.post_beacon_pool_attestations_v1(&unaggregated_attestations.iter().collect::<Vec<_>>())
.await
.unwrap();
client
.post_beacon_pool_attestations_v2(&unaggregated_attestations, fork_name)
.post_beacon_pool_attestations_v2(&unaggregated_attestations.iter().collect::<Vec<_>>(), fork_name)
.await
.unwrap();

Expand Down
2 changes: 1 addition & 1 deletion beacon_node/http_api/tests/interactive_tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -896,7 +896,7 @@ async fn queue_attestations_from_http() {
let fork_name = tester.harness.spec.fork_name_at_slot::<E>(attestation_slot);
let attestation_future = tokio::spawn(async move {
client
.post_beacon_pool_attestations_v2(&attestations, fork_name)
.post_beacon_pool_attestations_v2(&attestations.iter().collect::<Vec<_>>(), fork_name)
.await
.expect("attestations should be processed successfully")
});
Expand Down
14 changes: 7 additions & 7 deletions beacon_node/http_api/tests/tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1704,7 +1704,7 @@ impl ApiTester {

pub async fn test_post_beacon_pool_attestations_valid_v1(mut self) -> Self {
self.client
.post_beacon_pool_attestations_v1(self.attestations.as_slice())
.post_beacon_pool_attestations_v1(&self.attestations.iter().collect::<Vec<_>>())
.await
.unwrap();

Expand All @@ -1723,7 +1723,7 @@ impl ApiTester {
.map(|att| self.chain.spec.fork_name_at_slot::<E>(att.data().slot))
.unwrap();
self.client
.post_beacon_pool_attestations_v2(self.attestations.as_slice(), fork_name)
.post_beacon_pool_attestations_v2(&self.attestations.iter().collect::<Vec<_>>(), fork_name)
.await
.unwrap();
assert!(
Expand All @@ -1747,7 +1747,7 @@ impl ApiTester {

let err = self
.client
.post_beacon_pool_attestations_v1(attestations.as_slice())
.post_beacon_pool_attestations_v1(&attestations.iter().collect::<Vec<_>>())
.await
.unwrap_err();

Expand Down Expand Up @@ -1789,7 +1789,7 @@ impl ApiTester {

let err_v2 = self
.client
.post_beacon_pool_attestations_v2(attestations.as_slice(), fork_name)
.post_beacon_pool_attestations_v2(&attestations.iter().collect::<Vec<_>>(), fork_name)
.await
.unwrap_err();

Expand Down Expand Up @@ -3815,7 +3815,7 @@ impl ApiTester {

// Attest to the current slot
self.client
.post_beacon_pool_attestations_v1(self.attestations.as_slice())
.post_beacon_pool_attestations_v1(&self.attestations.iter().collect::<Vec<_>>())
.await
.unwrap();

Expand Down Expand Up @@ -5455,7 +5455,7 @@ impl ApiTester {

// Attest to the current slot
self.client
.post_beacon_pool_attestations_v1(self.attestations.as_slice())
.post_beacon_pool_attestations_v1(&self.attestations.iter().collect::<Vec<_>>())
.await
.unwrap();

Expand Down Expand Up @@ -5511,7 +5511,7 @@ impl ApiTester {
let expected_attestation_len = self.attestations.len();

self.client
.post_beacon_pool_attestations_v1(self.attestations.as_slice())
.post_beacon_pool_attestations_v1(&self.attestations.iter().collect::<Vec<_>>())
.await
.unwrap();

Expand Down
4 changes: 2 additions & 2 deletions common/eth2/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1264,7 +1264,7 @@ impl BeaconNodeHttpClient {
/// `POST v1/beacon/pool/attestations`
pub async fn post_beacon_pool_attestations_v1<E: EthSpec>(
&self,
attestations: &[Attestation<E>],
attestations: &[&Attestation<E>],
) -> Result<(), Error> {
let mut path = self.eth_path(V1)?;

Expand All @@ -1283,7 +1283,7 @@ impl BeaconNodeHttpClient {
/// `POST v2/beacon/pool/attestations`
pub async fn post_beacon_pool_attestations_v2<E: EthSpec>(
&self,
attestations: &[Attestation<E>],
attestations: &[&Attestation<E>],
fork_name: ForkName,
) -> Result<(), Error> {
let mut path = self.eth_path(V2)?;
Expand Down
48 changes: 47 additions & 1 deletion validator_client/slashing_protection/src/slashing_database.rs
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ use rusqlite::{params, OptionalExtension, Transaction, TransactionBehavior};
use std::fs::File;
use std::path::Path;
use std::time::Duration;
use types::{AttestationData, BeaconBlockHeader, Epoch, Hash256, PublicKeyBytes, SignedRoot, Slot};
use types::{Attestation, AttestationData, BeaconBlockHeader, Epoch, Hash256, PublicKeyBytes, SignedRoot, Slot};

type Pool = r2d2::Pool<SqliteConnectionManager>;

Expand Down Expand Up @@ -599,6 +599,13 @@ impl SlashingDatabase {
Ok(safe)
}

// pub fn batch_check_and_insert_attestations(
// &self,
// attestations_to_check: Vec<(Attestation<E>, DutyAndProof)>,
// ) {

// }

/// Check an attestation for slash safety, and if it is safe, record it in the database.
///
/// The checking and inserting happen atomically and exclusively. We enforce exclusivity
Expand Down Expand Up @@ -641,6 +648,45 @@ impl SlashingDatabase {
Ok(safe)
}

// TODO(attn-slash) previously I had separated checking the signing root
// add inserting. But this doesn't seem necessary and seems to add a bit of unneeded complexity
// will review this before deleting.
// pub fn check_attestation_signing_root(
// &self,
// validator_pubkey: &PublicKeyBytes,
// attestation: &AttestationData,
// domain: Hash256,
// ) -> Result<Safe, NotSafe> {
// let attestation_signing_root = attestation.signing_root(domain).into();
// let mut conn = self.conn_pool.get()?;
// let txn = conn.transaction_with_behavior(TransactionBehavior::Deferred)?;
// self.check_attestation(
// &txn,
// validator_pubkey,
// attestation.source.epoch,
// attestation.target.epoch,
// attestation_signing_root,
// )
// }

// pub fn insert_attestation_signing_root(
// &self,
// validator_pubkey: &PublicKeyBytes,
// attestation: &AttestationData,
// domain: Hash256,
// ) -> Result<(), NotSafe> {
// let attestation_signing_root = attestation.signing_root(domain).into();
// let mut conn = self.conn_pool.get()?;
// let txn = conn.transaction_with_behavior(TransactionBehavior::Exclusive)?;
// self.insert_attestation(
// &txn,
// validator_pubkey,
// attestation.source.epoch,
// attestation.target.epoch,
// attestation_signing_root,
// )
// }

/// Transactional variant of `check_and_insert_attestation_signing_root`.
fn check_and_insert_attestation_signing_root_txn(
&self,
Expand Down
84 changes: 84 additions & 0 deletions validator_client/src/attestation_data_service.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,84 @@
use std::{collections::HashMap, sync::Arc};

use slot_clock::SlotClock;
use types::{AttestationData, CommitteeIndex, EthSpec, ForkName, Slot};

use crate::{
beacon_node_fallback::{BeaconNodeFallback, OfflineOnFailure, RequireSynced},
http_metrics::metrics,
};

/// The AttestationDataService is responsible for downloading and caching attestation data at a given slot
/// for a range of committee indexes. It also helps prevent us from re-downloading identical attestation data.
pub struct AttestationDataService<T: SlotClock, E: EthSpec> {
attestation_data_by_committee: HashMap<CommitteeIndex, AttestationData>,
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Note that all attestations votes are the same on each committee, the only change is in the index field which equals the key of this hashmap CommitteeIndex. If you are refactoring the flow you can consider an optimization to only get a single AttestationData for all committees.

However, some DVT solution relies on a call for each CommitteeIndex being made. I'm not sure if this is the case but it was ~1.5 years ago.

beacon_nodes: Arc<BeaconNodeFallback<T, E>>,
}

impl<T: SlotClock, E: EthSpec> AttestationDataService<T, E> {
pub fn new(beacon_nodes: Arc<BeaconNodeFallback<T, E>>) -> Self {
Self {
attestation_data_by_committee: HashMap::new(),
beacon_nodes,
}
}

/// Get previously downloaded attestation data by a given committee index. If the Electra fork is enabled
/// we don't care about the committee index
pub fn get_data_by_committee_index(
&self,
committee_index: &CommitteeIndex,
fork_name: &ForkName,
) -> Option<AttestationData> {
if fork_name.electra_enabled() {
let data = self.attestation_data_by_committee.iter().next();
if let Some((_, data)) = data {
return Some(data.clone());
}
None
} else {
self.attestation_data_by_committee
.get(committee_index)
.cloned()
}
}

/// Download attestation data for this slot/committee index from the beacon node.
pub async fn download_data(
&mut self,
committee_index: &CommitteeIndex,
slot: &Slot,
fork_name: &ForkName,
) -> Result<(), String> {
// If we've already downloaded data for this committee index OR electra is enabled and
// we've already downloaded data for this slot, there's no need to re-download the data.
if let Some(_) = self.get_data_by_committee_index(committee_index, fork_name) {
return Ok(());
}

let attestation_data = self
.beacon_nodes
.first_success(
RequireSynced::No,
OfflineOnFailure::Yes,
|beacon_node| async move {
let _timer = metrics::start_timer_vec(
&metrics::ATTESTATION_SERVICE_TIMES,
&[metrics::ATTESTATIONS_HTTP_GET],
);
beacon_node
.get_validator_attestation_data(slot.clone(), *committee_index)
.await
.map_err(|e| format!("Failed to produce attestation data: {:?}", e))
.map(|result| result.data)
},
)
.await
.map_err(|e| e.to_string())?;

self.attestation_data_by_committee
.insert(*committee_index, attestation_data);

Ok(())
}
}
Loading
Loading