|
| 1 | +/* tag::catalog[] |
| 2 | +Title:: Boundary nodes integration test |
| 3 | +
|
| 4 | +Goal:: Test if the Boundary handles raw and non-raw traffic as expected. |
| 5 | +
|
| 6 | +Runbook:: |
| 7 | +. Setup: |
| 8 | + . A running BN VM. |
| 9 | + . A subnet with 1 HTTP canister and 1 non-HTTP canister, both counters. |
| 10 | +. Call into the non-HTTP canister, expecting the counter to increment. |
| 11 | +. Call into the HTTP canister, expecting the counter to increment. |
| 12 | +. Update the denylist to block the HTTP canister. |
| 13 | +. Call into the HTTP canister again, but expecting a 451. |
| 14 | +
|
| 15 | +Success:: |
| 16 | +. The calls succeed with the expected values. |
| 17 | +end::catalog[] */ |
| 18 | + |
| 19 | +use crate::driver::{ |
| 20 | + api_boundary_node::{ApiBoundaryNode, ApiBoundaryNodeVm}, |
| 21 | + ic::{InternetComputer, Subnet}, |
| 22 | + test_env::TestEnv, |
| 23 | + test_env_api::{ |
| 24 | + retry_async, HasPublicApiUrl, HasTopologySnapshot, IcNodeContainer, NnsInstallationExt, |
| 25 | + RetrieveIpv4Addr, SshSession, READY_WAIT_TIMEOUT, RETRY_BACKOFF, |
| 26 | + }, |
| 27 | +}; |
| 28 | + |
| 29 | +use std::{convert::TryFrom, io::Read, time::Duration}; |
| 30 | + |
| 31 | +use anyhow::{Context, Error}; |
| 32 | +use ic_interfaces_registry::RegistryValue; |
| 33 | +use ic_protobuf::registry::routing_table::v1::RoutingTable as PbRoutingTable; |
| 34 | +use ic_registry_keys::make_routing_table_record_key; |
| 35 | +use ic_registry_nns_data_provider::registry::RegistryCanister; |
| 36 | +use ic_registry_routing_table::RoutingTable; |
| 37 | +use ic_registry_subnet_type::SubnetType; |
| 38 | +use slog::info; |
| 39 | + |
| 40 | +const API_BOUNDARY_NODE_NAME: &str = "boundary-node-1"; |
| 41 | + |
| 42 | +struct PanicHandler { |
| 43 | + env: TestEnv, |
| 44 | + is_enabled: bool, |
| 45 | +} |
| 46 | + |
| 47 | +impl PanicHandler { |
| 48 | + fn new(env: TestEnv) -> Self { |
| 49 | + Self { |
| 50 | + env, |
| 51 | + is_enabled: true, |
| 52 | + } |
| 53 | + } |
| 54 | + |
| 55 | + fn disable(&mut self) { |
| 56 | + self.is_enabled = false; |
| 57 | + } |
| 58 | +} |
| 59 | + |
| 60 | +impl Drop for PanicHandler { |
| 61 | + fn drop(&mut self) { |
| 62 | + if !self.is_enabled { |
| 63 | + return; |
| 64 | + } |
| 65 | + |
| 66 | + std::thread::sleep(Duration::from_secs(60)); |
| 67 | + |
| 68 | + let logger = self.env.logger(); |
| 69 | + |
| 70 | + let boundary_node = self |
| 71 | + .env |
| 72 | + .get_deployed_api_boundary_node(API_BOUNDARY_NODE_NAME) |
| 73 | + .unwrap() |
| 74 | + .get_snapshot() |
| 75 | + .unwrap(); |
| 76 | + |
| 77 | + let (list_dependencies, exit_status) = exec_ssh_command( |
| 78 | + &boundary_node, |
| 79 | + "systemctl list-dependencies systemd-sysusers.service --all --reverse --no-pager", |
| 80 | + ) |
| 81 | + .unwrap(); |
| 82 | + |
| 83 | + info!( |
| 84 | + logger, |
| 85 | + "systemctl {API_BOUNDARY_NODE_NAME} = '{list_dependencies}'. Exit status = {}", |
| 86 | + exit_status, |
| 87 | + ); |
| 88 | + } |
| 89 | +} |
| 90 | + |
| 91 | +fn exec_ssh_command(vm: &dyn SshSession, command: &str) -> Result<(String, i32), Error> { |
| 92 | + let mut channel = vm.block_on_ssh_session()?.channel_session()?; |
| 93 | + |
| 94 | + channel.exec(command)?; |
| 95 | + |
| 96 | + let mut output = String::new(); |
| 97 | + channel.read_to_string(&mut output)?; |
| 98 | + channel.wait_close()?; |
| 99 | + |
| 100 | + Ok((output, channel.exit_status()?)) |
| 101 | +} |
| 102 | + |
| 103 | +#[derive(Copy, Clone)] |
| 104 | +pub enum ApiBoundaryNodeHttpsConfig { |
| 105 | + /// Acquire a playnet certificate (or fail if all have been acquired already) |
| 106 | + /// for the domain `ic{ix}.farm.dfinity.systems` |
| 107 | + /// where `ix` is the index of the acquired playnet. |
| 108 | + /// |
| 109 | + /// Then create an AAAA record pointing |
| 110 | + /// `ic{ix}.farm.dfinity.systems` to the IPv6 address of the BN. |
| 111 | + /// |
| 112 | + /// Also add CNAME records for |
| 113 | + /// `*.ic{ix}.farm.dfinity.systems` and |
| 114 | + /// `*.raw.ic{ix}.farm.dfinity.systems` |
| 115 | + /// pointing to `ic{ix}.farm.dfinity.systems`. |
| 116 | + /// |
| 117 | + /// If IPv4 has been enabled for the BN (`has_ipv4`), |
| 118 | + /// also add a corresponding A record pointing to the IPv4 address of the BN. |
| 119 | + /// |
| 120 | + /// Finally configure the BN with the playnet certificate. |
| 121 | + /// |
| 122 | + /// Note that if multiple BNs are created within the same |
| 123 | + /// farm-group, they will share the same certificate and |
| 124 | + /// domain name. |
| 125 | + /// Also all their IPv6 addresses will be added to the AAAA record |
| 126 | + /// and all their IPv4 addresses will be added to the A record. |
| 127 | + UseRealCertsAndDns, |
| 128 | + |
| 129 | + /// Don't create real certificates and DNS records, |
| 130 | + /// instead dangerously accept self-signed certificates and |
| 131 | + /// resolve domains on the client-side without quering DNS. |
| 132 | + AcceptInvalidCertsAndResolveClientSide, |
| 133 | +} |
| 134 | + |
| 135 | +pub fn mk_setup(api_bn_https_config: ApiBoundaryNodeHttpsConfig) -> impl Fn(TestEnv) { |
| 136 | + move |env: TestEnv| { |
| 137 | + setup(api_bn_https_config, env); |
| 138 | + } |
| 139 | +} |
| 140 | + |
| 141 | +fn setup(api_bn_https_config: ApiBoundaryNodeHttpsConfig, env: TestEnv) { |
| 142 | + let logger = env.logger(); |
| 143 | + |
| 144 | + InternetComputer::new() |
| 145 | + .add_subnet(Subnet::new(SubnetType::System).add_nodes(1)) |
| 146 | + .setup_and_start(&env) |
| 147 | + .expect("failed to setup IC under test"); |
| 148 | + |
| 149 | + env.topology_snapshot() |
| 150 | + .root_subnet() |
| 151 | + .nodes() |
| 152 | + .next() |
| 153 | + .unwrap() |
| 154 | + .install_nns_canisters() |
| 155 | + .expect("Could not install NNS canisters"); |
| 156 | + |
| 157 | + let api_bn = ApiBoundaryNode::new(String::from(API_BOUNDARY_NODE_NAME)) |
| 158 | + .allocate_vm(&env) |
| 159 | + .unwrap() |
| 160 | + .for_ic(&env, ""); |
| 161 | + let api_bn = match api_bn_https_config { |
| 162 | + ApiBoundaryNodeHttpsConfig::UseRealCertsAndDns => api_bn.use_real_certs_and_dns(), |
| 163 | + ApiBoundaryNodeHttpsConfig::AcceptInvalidCertsAndResolveClientSide => api_bn, |
| 164 | + }; |
| 165 | + api_bn |
| 166 | + .start(&env) |
| 167 | + .expect("failed to setup ApiBoundaryNode VM"); |
| 168 | + |
| 169 | + // Await Replicas |
| 170 | + info!(&logger, "Checking readiness of all replica nodes..."); |
| 171 | + for subnet in env.topology_snapshot().subnets() { |
| 172 | + for node in subnet.nodes() { |
| 173 | + node.await_status_is_healthy() |
| 174 | + .expect("Replica did not come up healthy."); |
| 175 | + } |
| 176 | + } |
| 177 | + |
| 178 | + let rt = tokio::runtime::Runtime::new().expect("Could not create tokio runtime."); |
| 179 | + |
| 180 | + info!(&logger, "Polling registry"); |
| 181 | + let registry = RegistryCanister::new(api_bn.nns_node_urls); |
| 182 | + let (latest, routes) = rt.block_on(retry_async(&logger, READY_WAIT_TIMEOUT, RETRY_BACKOFF, || async { |
| 183 | + let (bytes, latest) = registry.get_value(make_routing_table_record_key().into(), None).await |
| 184 | + .context("Failed to `get_value` from registry")?; |
| 185 | + let routes = PbRoutingTable::decode(bytes.as_slice()) |
| 186 | + .context("Failed to decode registry routes")?; |
| 187 | + let routes = RoutingTable::try_from(routes) |
| 188 | + .context("Failed to convert registry routes")?; |
| 189 | + Ok((latest, routes)) |
| 190 | + })) |
| 191 | + .expect("Failed to poll registry. This is not a Boundary Node error. It is a test environment issue."); |
| 192 | + info!(&logger, "Latest registry {latest}: {routes:?}"); |
| 193 | + |
| 194 | + // Await Boundary Node |
| 195 | + let api_boundary_node = env |
| 196 | + .get_deployed_api_boundary_node(API_BOUNDARY_NODE_NAME) |
| 197 | + .unwrap() |
| 198 | + .get_snapshot() |
| 199 | + .unwrap(); |
| 200 | + |
| 201 | + info!( |
| 202 | + &logger, |
| 203 | + "API Boundary node {API_BOUNDARY_NODE_NAME} has IPv6 {:?}", |
| 204 | + api_boundary_node.ipv6() |
| 205 | + ); |
| 206 | + info!( |
| 207 | + &logger, |
| 208 | + "API Boundary node {API_BOUNDARY_NODE_NAME} has IPv4 {:?}", |
| 209 | + api_boundary_node.block_on_ipv4().unwrap() |
| 210 | + ); |
| 211 | + |
| 212 | + info!(&logger, "Waiting for routes file"); |
| 213 | + let routes_path = "/var/opt/nginx/ic/ic_routes.js"; |
| 214 | + let sleep_command = format!("while grep -q '// PLACEHOLDER' {routes_path}; do sleep 5; done"); |
| 215 | + let (cmd_output, exit_status) = exec_ssh_command(&api_boundary_node, &sleep_command).unwrap(); |
| 216 | + info!( |
| 217 | + logger, |
| 218 | + "{API_BOUNDARY_NODE_NAME} ran `{sleep_command}`: '{}'. Exit status = {exit_status}", |
| 219 | + cmd_output.trim(), |
| 220 | + ); |
| 221 | + |
| 222 | + info!(&logger, "Checking API BN health"); |
| 223 | + api_boundary_node |
| 224 | + .await_status_is_healthy() |
| 225 | + .expect("Boundary node did not come up healthy."); |
| 226 | +} |
| 227 | + |
| 228 | +/* tag::catalog[] |
| 229 | +Title:: API BN no-op test |
| 230 | +
|
| 231 | +Goal:: None |
| 232 | +
|
| 233 | +Runbook: |
| 234 | +. N/A |
| 235 | +
|
| 236 | +Success:: Solar flares don't cause this test to crash |
| 237 | +
|
| 238 | +Coverage:: 1+1 still equals 2 |
| 239 | +
|
| 240 | +end::catalog[] */ |
| 241 | + |
| 242 | +pub fn noop_test(env: TestEnv) { |
| 243 | + let logger = env.logger(); |
| 244 | + |
| 245 | + let mut panic_handler = PanicHandler::new(env.clone()); |
| 246 | + |
| 247 | + let _api_boundary_node = env |
| 248 | + .get_deployed_api_boundary_node(API_BOUNDARY_NODE_NAME) |
| 249 | + .unwrap() |
| 250 | + .get_snapshot() |
| 251 | + .unwrap(); |
| 252 | + |
| 253 | + let rt = tokio::runtime::Runtime::new().expect("Could not create tokio runtime."); |
| 254 | + |
| 255 | + rt.block_on(async move { |
| 256 | + info!(&logger, "Nothing..."); |
| 257 | + }); |
| 258 | + |
| 259 | + panic_handler.disable(); |
| 260 | +} |
0 commit comments