Skip to content

Commit a2579d4

Browse files
authored
remove raptor coding experiments (pyth-network#255)
1 parent 5591db7 commit a2579d4

File tree

3 files changed

+2
-64
lines changed

3 files changed

+2
-64
lines changed

Cargo.lock

Lines changed: 0 additions & 7 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

core/Cargo.toml

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -85,7 +85,6 @@ trees = { workspace = true }
8585
[dev-dependencies]
8686
assert_matches = { workspace = true }
8787
fs_extra = { workspace = true }
88-
raptorq = { workspace = true }
8988
serde_json = { workspace = true }
9089
serial_test = { workspace = true }
9190
# See order-crates-for-publishing.py for using this unusual `path = "."`

core/benches/shredder.rs

Lines changed: 2 additions & 56 deletions
Original file line numberDiff line numberDiff line change
@@ -4,26 +4,17 @@
44
extern crate test;
55

66
use {
7-
rand::{seq::SliceRandom, Rng},
8-
raptorq::{Decoder, Encoder},
7+
rand::Rng,
98
solana_entry::entry::{create_ticks, Entry},
109
solana_ledger::shred::{
1110
max_entries_per_n_shred, max_ticks_per_n_shreds, ProcessShredsStats, ReedSolomonCache,
1211
Shred, ShredFlags, Shredder, DATA_SHREDS_PER_FEC_BLOCK, LEGACY_SHRED_DATA_CAPACITY,
1312
},
1413
solana_perf::test_tx,
15-
solana_sdk::{hash::Hash, packet::PACKET_DATA_SIZE, signature::Keypair},
14+
solana_sdk::{hash::Hash, signature::Keypair},
1615
test::Bencher,
1716
};
1817

19-
// Copied these values here to avoid exposing shreds
20-
// internals only for the sake of benchmarks.
21-
22-
// size of nonce: 4
23-
// size of common shred header: 83
24-
// size of coding shred header: 6
25-
const VALID_SHRED_DATA_LEN: usize = PACKET_DATA_SIZE - 4 - 83 - 6;
26-
2718
fn make_test_entry(txs_per_entry: u64) -> Entry {
2819
Entry {
2920
num_hashes: 100_000,
@@ -61,17 +52,6 @@ fn make_shreds(num_shreds: usize) -> Vec<Shred> {
6152
data_shreds
6253
}
6354

64-
fn make_concatenated_shreds(num_shreds: usize) -> Vec<u8> {
65-
let data_shreds = make_shreds(num_shreds);
66-
let mut data: Vec<u8> = vec![0; num_shreds * VALID_SHRED_DATA_LEN];
67-
for (i, shred) in (data_shreds[0..num_shreds]).iter().enumerate() {
68-
data[i * VALID_SHRED_DATA_LEN..(i + 1) * VALID_SHRED_DATA_LEN]
69-
.copy_from_slice(&shred.payload()[..VALID_SHRED_DATA_LEN]);
70-
}
71-
72-
data
73-
}
74-
7555
#[bench]
7656
fn bench_shredder_ticks(bencher: &mut Bencher) {
7757
let kp = Keypair::new();
@@ -197,37 +177,3 @@ fn bench_shredder_decoding(bencher: &mut Bencher) {
197177
Shredder::try_recovery(coding_shreds[..].to_vec(), &reed_solomon_cache).unwrap();
198178
})
199179
}
200-
201-
#[bench]
202-
fn bench_shredder_coding_raptorq(bencher: &mut Bencher) {
203-
let symbol_count = DATA_SHREDS_PER_FEC_BLOCK;
204-
let data = make_concatenated_shreds(symbol_count);
205-
bencher.iter(|| {
206-
let encoder = Encoder::with_defaults(&data, VALID_SHRED_DATA_LEN as u16);
207-
encoder.get_encoded_packets(symbol_count as u32);
208-
})
209-
}
210-
211-
#[bench]
212-
fn bench_shredder_decoding_raptorq(bencher: &mut Bencher) {
213-
let symbol_count = DATA_SHREDS_PER_FEC_BLOCK;
214-
let data = make_concatenated_shreds(symbol_count);
215-
let encoder = Encoder::with_defaults(&data, VALID_SHRED_DATA_LEN as u16);
216-
let mut packets = encoder.get_encoded_packets(symbol_count as u32);
217-
packets.shuffle(&mut rand::thread_rng());
218-
219-
// Here we simulate losing 1 less than 50% of the packets randomly
220-
packets.truncate(packets.len() - packets.len() / 2 + 1);
221-
222-
bencher.iter(|| {
223-
let mut decoder = Decoder::new(encoder.get_config());
224-
let mut result = None;
225-
for packet in &packets {
226-
result = decoder.decode(packet.clone());
227-
if result.is_some() {
228-
break;
229-
}
230-
}
231-
assert_eq!(result.unwrap(), data);
232-
})
233-
}

0 commit comments

Comments
 (0)