|
4 | 4 | extern crate test;
|
5 | 5 |
|
6 | 6 | use {
|
7 |
| - rand::{seq::SliceRandom, Rng}, |
8 |
| - raptorq::{Decoder, Encoder}, |
| 7 | + rand::Rng, |
9 | 8 | solana_entry::entry::{create_ticks, Entry},
|
10 | 9 | solana_ledger::shred::{
|
11 | 10 | max_entries_per_n_shred, max_ticks_per_n_shreds, ProcessShredsStats, ReedSolomonCache,
|
12 | 11 | Shred, ShredFlags, Shredder, DATA_SHREDS_PER_FEC_BLOCK, LEGACY_SHRED_DATA_CAPACITY,
|
13 | 12 | },
|
14 | 13 | solana_perf::test_tx,
|
15 |
| - solana_sdk::{hash::Hash, packet::PACKET_DATA_SIZE, signature::Keypair}, |
| 14 | + solana_sdk::{hash::Hash, signature::Keypair}, |
16 | 15 | test::Bencher,
|
17 | 16 | };
|
18 | 17 |
|
19 |
| -// Copied these values here to avoid exposing shreds |
20 |
| -// internals only for the sake of benchmarks. |
21 |
| - |
22 |
| -// size of nonce: 4 |
23 |
| -// size of common shred header: 83 |
24 |
| -// size of coding shred header: 6 |
25 |
| -const VALID_SHRED_DATA_LEN: usize = PACKET_DATA_SIZE - 4 - 83 - 6; |
26 |
| - |
27 | 18 | fn make_test_entry(txs_per_entry: u64) -> Entry {
|
28 | 19 | Entry {
|
29 | 20 | num_hashes: 100_000,
|
@@ -61,17 +52,6 @@ fn make_shreds(num_shreds: usize) -> Vec<Shred> {
|
61 | 52 | data_shreds
|
62 | 53 | }
|
63 | 54 |
|
64 |
| -fn make_concatenated_shreds(num_shreds: usize) -> Vec<u8> { |
65 |
| - let data_shreds = make_shreds(num_shreds); |
66 |
| - let mut data: Vec<u8> = vec![0; num_shreds * VALID_SHRED_DATA_LEN]; |
67 |
| - for (i, shred) in (data_shreds[0..num_shreds]).iter().enumerate() { |
68 |
| - data[i * VALID_SHRED_DATA_LEN..(i + 1) * VALID_SHRED_DATA_LEN] |
69 |
| - .copy_from_slice(&shred.payload()[..VALID_SHRED_DATA_LEN]); |
70 |
| - } |
71 |
| - |
72 |
| - data |
73 |
| -} |
74 |
| - |
75 | 55 | #[bench]
|
76 | 56 | fn bench_shredder_ticks(bencher: &mut Bencher) {
|
77 | 57 | let kp = Keypair::new();
|
@@ -197,37 +177,3 @@ fn bench_shredder_decoding(bencher: &mut Bencher) {
|
197 | 177 | Shredder::try_recovery(coding_shreds[..].to_vec(), &reed_solomon_cache).unwrap();
|
198 | 178 | })
|
199 | 179 | }
|
200 |
| - |
201 |
| -#[bench] |
202 |
| -fn bench_shredder_coding_raptorq(bencher: &mut Bencher) { |
203 |
| - let symbol_count = DATA_SHREDS_PER_FEC_BLOCK; |
204 |
| - let data = make_concatenated_shreds(symbol_count); |
205 |
| - bencher.iter(|| { |
206 |
| - let encoder = Encoder::with_defaults(&data, VALID_SHRED_DATA_LEN as u16); |
207 |
| - encoder.get_encoded_packets(symbol_count as u32); |
208 |
| - }) |
209 |
| -} |
210 |
| - |
211 |
| -#[bench] |
212 |
| -fn bench_shredder_decoding_raptorq(bencher: &mut Bencher) { |
213 |
| - let symbol_count = DATA_SHREDS_PER_FEC_BLOCK; |
214 |
| - let data = make_concatenated_shreds(symbol_count); |
215 |
| - let encoder = Encoder::with_defaults(&data, VALID_SHRED_DATA_LEN as u16); |
216 |
| - let mut packets = encoder.get_encoded_packets(symbol_count as u32); |
217 |
| - packets.shuffle(&mut rand::thread_rng()); |
218 |
| - |
219 |
| - // Here we simulate losing 1 less than 50% of the packets randomly |
220 |
| - packets.truncate(packets.len() - packets.len() / 2 + 1); |
221 |
| - |
222 |
| - bencher.iter(|| { |
223 |
| - let mut decoder = Decoder::new(encoder.get_config()); |
224 |
| - let mut result = None; |
225 |
| - for packet in &packets { |
226 |
| - result = decoder.decode(packet.clone()); |
227 |
| - if result.is_some() { |
228 |
| - break; |
229 |
| - } |
230 |
| - } |
231 |
| - assert_eq!(result.unwrap(), data); |
232 |
| - }) |
233 |
| -} |
0 commit comments