Skip to content

Commit d52be4c

Browse files
isSergedniehi1i1nazar-pc
authored
Include most recent changes into subspace-customization branch (#41)
* Disable nodes in memory and hide list and map views * Add the node count to the header * Limit nodes in map view to 1300 * Sort nodes only when receive new best block (#11) * Enable nodes list (#13) * Show the stats view when nodes are disabled * Clean up Header Tile styles * Add node count to Header's shouldComponentUpdate (#19) * Make space pledged visible (#20) * add space pledged tile to Header * update npm packages update tsconfig remove babelrc, babel presets as well as stable package fix svg namespace tag syntax errors fix reference error due to namespace and class component having same name replace tslint with eslint make eslint happier update .nvmrc to 14 update node version to 14 in gh workflow fix eslint warnings due to warnings treated as errors on CI (process.env.CI = true) pretty fix * format space pledged value minor minor * only display space pledged for gemini II * minor * Update map (#21) * Map: render node locations instead of individual nodes * render city details * Merge branch 'telemetry-frontend-stress-test' of github.com:subspace/substrate-telemetry into map-update * Add graceful degradation screen for list view to redirect to polkadot apps (#25) * add DISABLE_NODE_LIST env var * add message on checking balance * update message * Remove unnecessary fields (#26) * Remove some unnecessary fields and fix deserialization * hide validator and implementation columns minor * Ignore broken test Co-authored-by: i1i1 <vanyarybin1@live.ru> * Deployment fixes (#27) * Replace boolean value with number in docker-compose.yml * bump docker to 14 * use createRoot instead of react-dom, remove service worker * update browsers list in package.json * Omit sending node data * fix boolean to disable node list * use window.process_env instead of process.env (#32) * Adjust styles (#33) * Omit more data related to node (#34) * Move omiting data from env to cli * Drain messages to locator if we skip node info * Omit node data delta updates * Add trait for feed message writer * Skip serialization for node update * Send updates every 10 seconds for top bar * Supply period for updates from cli * Update docker compose * Show unique reward address count (#29) * adjust styles for second row * add tile for unique reward address count * add uniq address count into state * add tile for unique address count * fetch uniq address from from subscan API * setup minimal server * fetch address count every 10s * added cors related headers * fetch uniq addr count from our backend instead of subscan * hide uniq addr count component if value is not present * adjust styles * update header markup * provide api url as env variable * rename folder * add service to docker-compose * fetch metadata, remove polkadot.js api * parse string values * add dockerfile for metadata update Dockerfile update debug logs remove logs * Reduce metadata queries (#37) * Reduce metadata queries * addressing PR comments * retry if failed request * Update frontend/src/App.tsx Co-authored-by: Nazar Mokrynskyi <nazar@mokrynskyi.com> Co-authored-by: Nazar Mokrynskyi <nazar@mokrynskyi.com> * Add second local shard * Use Math.log to calc dot size (#39) * Use Math.log to calc dot size * update comment * pretty fix Co-authored-by: dnieh <danielnieh@gmail.com> Co-authored-by: i1i1 <vanyarybin1@live.ru> Co-authored-by: Nazar Mokrynskyi <nazar@mokrynskyi.com>
1 parent 884bd3b commit d52be4c

File tree

103 files changed

+9917
-9470
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

103 files changed

+9917
-9470
lines changed

.github/workflows/frontend.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@ jobs:
2525

2626
strategy:
2727
matrix:
28-
node-version: [10.x, 12.x]
28+
node-version: [14.x]
2929

3030
steps:
3131
- uses: actions/checkout@v2

backend/common/src/node_message.rs

Lines changed: 0 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -60,7 +60,6 @@ pub enum Payload {
6060
BlockImport(Block),
6161
NotifyFinalized(Finalized),
6262
AfgAuthoritySet(AfgAuthoritySet),
63-
HwBench(NodeHwBench),
6463
}
6564

6665
#[derive(Serialize, Deserialize, Debug, Clone)]
@@ -94,14 +93,6 @@ pub struct AfgAuthoritySet {
9493
pub authority_set_id: Box<str>,
9594
}
9695

97-
#[derive(Serialize, Deserialize, Debug, Clone)]
98-
pub struct NodeHwBench {
99-
pub cpu_hashrate_score: u64,
100-
pub memory_memcpy_score: u64,
101-
pub disk_sequential_write_score: Option<u64>,
102-
pub disk_random_write_score: Option<u64>,
103-
}
104-
10596
impl Payload {
10697
pub fn best_block(&self) -> Option<&Block> {
10798
match self {
@@ -152,15 +143,10 @@ mod tests {
152143
node: NodeDetails {
153144
chain: "foo".into(),
154145
name: "foo".into(),
155-
implementation: "foo".into(),
156146
version: "foo".into(),
157-
target_arch: Some("x86_64".into()),
158-
target_os: Some("linux".into()),
159-
target_env: Some("env".into()),
160147
validator: None,
161148
network_id: ArrayString::new(),
162149
startup_time: None,
163-
sysinfo: None,
164150
},
165151
}),
166152
});

backend/common/src/node_types.rs

Lines changed: 0 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -33,45 +33,10 @@ pub type NetworkId = ArrayString<64>;
3333
pub struct NodeDetails {
3434
pub chain: Box<str>,
3535
pub name: Box<str>,
36-
pub implementation: Box<str>,
3736
pub version: Box<str>,
3837
pub validator: Option<Box<str>>,
3938
pub network_id: NetworkId,
4039
pub startup_time: Option<Box<str>>,
41-
pub target_os: Option<Box<str>>,
42-
pub target_arch: Option<Box<str>>,
43-
pub target_env: Option<Box<str>>,
44-
pub sysinfo: Option<NodeSysInfo>,
45-
}
46-
47-
/// Hardware and software information for the node.
48-
#[derive(Serialize, Deserialize, Debug, Clone)]
49-
pub struct NodeSysInfo {
50-
/// The exact CPU model.
51-
pub cpu: Option<Box<str>>,
52-
/// The total amount of memory, in bytes.
53-
pub memory: Option<u64>,
54-
/// The number of physical CPU cores.
55-
pub core_count: Option<u32>,
56-
/// The Linux kernel version.
57-
pub linux_kernel: Option<Box<str>>,
58-
/// The exact Linux distribution used.
59-
pub linux_distro: Option<Box<str>>,
60-
/// Whether the node's running under a virtual machine.
61-
pub is_virtual_machine: Option<bool>,
62-
}
63-
64-
/// Hardware benchmark results for the node.
65-
#[derive(Serialize, Deserialize, Debug, Clone)]
66-
pub struct NodeHwBench {
67-
/// The CPU speed, as measured in how many MB/s it can hash using the BLAKE2b-256 hash.
68-
pub cpu_hashrate_score: u64,
69-
/// Memory bandwidth in MB/s, calculated by measuring the throughput of `memcpy`.
70-
pub memory_memcpy_score: u64,
71-
/// Sequential disk write speed in MB/s.
72-
pub disk_sequential_write_score: Option<u64>,
73-
/// Random disk write speed in MB/s.
74-
pub disk_random_write_score: Option<u64>,
7540
}
7641

7742
/// A couple of node statistics.

backend/telemetry_core/src/aggregator/aggregator.rs

Lines changed: 66 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,7 @@ use futures::{future, Sink, SinkExt};
2222
use std::net::IpAddr;
2323
use std::sync::atomic::AtomicU64;
2424
use std::sync::Arc;
25+
use std::time::Duration;
2526

2627
id_type! {
2728
/// A unique Id is assigned per websocket connection (or more accurately,
@@ -44,6 +45,8 @@ pub struct AggregatorOpts {
4445
/// How many nodes from third party chains are allowed to connect
4546
/// before we prevent connections from them.
4647
pub max_third_party_nodes: usize,
48+
/// Send updates periodically
49+
pub update_every: Option<Duration>,
4750
}
4851

4952
struct AggregatorInternal {
@@ -61,25 +64,63 @@ struct AggregatorInternal {
6164

6265
impl Aggregator {
6366
/// Spawn a new Aggregator. This connects to the telemetry backend
64-
pub async fn spawn(opts: AggregatorOpts) -> anyhow::Result<Aggregator> {
67+
pub async fn spawn(
68+
AggregatorOpts {
69+
denylist,
70+
max_queue_len,
71+
max_third_party_nodes,
72+
update_every,
73+
}: AggregatorOpts,
74+
) -> anyhow::Result<Aggregator> {
6575
let (tx_to_aggregator, rx_from_external) = flume::unbounded();
6676

67-
// Kick off a locator task to locate nodes, which hands back a channel to make location requests
68-
let tx_to_locator =
69-
find_location(tx_to_aggregator.clone().into_sink().with(|(node_id, msg)| {
70-
future::ok::<_, flume::SendError<_>>(inner_loop::ToAggregator::FromFindLocation(
71-
node_id, msg,
72-
))
73-
}));
74-
75-
// Handle any incoming messages in our handler loop:
76-
tokio::spawn(Aggregator::handle_messages(
77-
rx_from_external,
78-
tx_to_locator,
79-
opts.max_queue_len,
80-
opts.denylist,
81-
opts.max_third_party_nodes,
82-
));
77+
match update_every {
78+
None => {
79+
// Kick off a locator task to locate nodes, which hands back a channel to make location requests
80+
let tx_to_locator =
81+
find_location(tx_to_aggregator.clone().into_sink().with(|(node_id, msg)| {
82+
future::ok::<_, flume::SendError<_>>(
83+
inner_loop::ToAggregator::FromFindLocation(node_id, msg),
84+
)
85+
}));
86+
87+
// Handle any incoming messages in our handler loop:
88+
tokio::spawn(Aggregator::handle_messages(
89+
rx_from_external,
90+
tx_to_locator.into_sink(),
91+
max_queue_len,
92+
denylist,
93+
max_third_party_nodes,
94+
true,
95+
));
96+
}
97+
Some(update_every) => {
98+
tokio::task::spawn({
99+
let tx_to_aggregator = tx_to_aggregator.clone();
100+
let mut timer = tokio::time::interval(update_every);
101+
// First tick is instant
102+
timer.tick().await;
103+
104+
async move {
105+
while let Ok(()) =
106+
tx_to_aggregator.send(inner_loop::ToAggregator::SendUpdates)
107+
{
108+
timer.tick().await;
109+
}
110+
}
111+
});
112+
113+
// Handle any incoming messages in our handler loop:
114+
tokio::spawn(Aggregator::handle_messages(
115+
rx_from_external,
116+
futures::sink::drain(),
117+
max_queue_len,
118+
denylist,
119+
max_third_party_nodes,
120+
false,
121+
));
122+
}
123+
}
83124

84125
// Return a handle to our aggregator:
85126
Ok(Aggregator(Arc::new(AggregatorInternal {
@@ -92,20 +133,24 @@ impl Aggregator {
92133
/// This is spawned into a separate task and handles any messages coming
93134
/// in to the aggregator. If nobody is holding the tx side of the channel
94135
/// any more, this task will gracefully end.
95-
async fn handle_messages(
136+
async fn handle_messages<A>(
96137
rx_from_external: flume::Receiver<inner_loop::ToAggregator>,
97-
tx_to_aggregator: flume::Sender<(NodeId, IpAddr)>,
138+
tx_to_aggregator: A,
98139
max_queue_len: usize,
99140
denylist: Vec<String>,
100141
max_third_party_nodes: usize,
101-
) {
142+
send_node_data: bool,
143+
) where
144+
A: Sink<(NodeId, IpAddr)> + Send + Unpin + 'static,
145+
{
102146
inner_loop::InnerLoop::new(
103147
tx_to_aggregator,
104148
denylist,
105149
max_queue_len,
106150
max_third_party_nodes,
151+
send_node_data,
107152
)
108-
.handle(rx_from_external)
153+
.handle(rx_from_external.into_stream())
109154
.await;
110155
}
111156

0 commit comments

Comments
 (0)