diff --git a/Cargo.toml b/Cargo.toml index e5470c8d..33ee922f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -15,7 +15,7 @@ members = [ [workspace.package] authors = ["Parity Technologies "] edition = "2021" -version = "0.2.7" +version = "0.2.9" rust-version = "1.70.0" license = "Apache-2.0 OR GPL-3.0" repository = "https://github.com/paritytech/zombienet-sdk" @@ -59,9 +59,9 @@ tracing-subscriber = { version = "0.3" } glob-match = "0.2.1" # Zombienet workspace crates: -support = { package = "zombienet-support", version = "0.2.7", path = "crates/support" } -configuration = { package = "zombienet-configuration", version = "0.2.7", path = "crates/configuration" } -orchestrator = { package = "zombienet-orchestrator", version = "0.2.7", path = "crates/orchestrator" } -provider = { package = "zombienet-provider", version = "0.2.7", path = "crates/provider" } -prom-metrics-parser = { package = "zombienet-prom-metrics-parser", version = "0.2.7", path = "crates/prom-metrics-parser" } -zombienet-sdk = { version = "0.2.7", path = "crates/sdk" } +support = { package = "zombienet-support", version = "0.2.9", path = "crates/support" } +configuration = { package = "zombienet-configuration", version = "0.2.9", path = "crates/configuration" } +orchestrator = { package = "zombienet-orchestrator", version = "0.2.9", path = "crates/orchestrator" } +provider = { package = "zombienet-provider", version = "0.2.9", path = "crates/provider" } +prom-metrics-parser = { package = "zombienet-prom-metrics-parser", version = "0.2.9", path = "crates/prom-metrics-parser" } +zombienet-sdk = { version = "0.2.9", path = "crates/sdk" } diff --git a/crates/orchestrator/src/generators/chain_spec.rs b/crates/orchestrator/src/generators/chain_spec.rs index e93c0161..b3548a13 100644 --- a/crates/orchestrator/src/generators/chain_spec.rs +++ b/crates/orchestrator/src/generators/chain_spec.rs @@ -175,7 +175,6 @@ impl ChainSpec { let sanitized_cmd = if replacement_value.is_empty() { // we need to remove the `--chain` flag self.command.as_ref().unwrap().cmd().replace("--chain", "") - //.as_ref().unwrap().replace("--chain", "") } else { self.command.as_ref().unwrap().cmd().to_owned() }; @@ -881,7 +880,7 @@ fn add_balances( } } -fn get_node_keys(node: &NodeSpec, use_stash: bool) -> GenesisNodeKey { +fn get_node_keys(node: &NodeSpec, use_stash: bool, asset_hub_polkadot: bool) -> GenesisNodeKey { let sr_account = node.accounts.accounts.get("sr").unwrap(); let sr_stash = node.accounts.accounts.get("sr_stash").unwrap(); let ed_account = node.accounts.accounts.get("ed").unwrap(); @@ -898,6 +897,10 @@ fn get_node_keys(node: &NodeSpec, use_stash: bool) -> GenesisNodeKey { "nimbus", "vrf", ] { + if k == "aura" && asset_hub_polkadot { + keys.insert(k.to_string(), ed_account.address.clone()); + continue; + } keys.insert(k.to_string(), sr_account.address.clone()); } @@ -917,10 +920,15 @@ fn add_authorities( nodes: &[&NodeSpec], use_stash: bool, ) { + let asset_hub_polkadot = chain_spec_json + .get("id") + .and_then(|v| v.as_str()) + .map(|id| id.starts_with("asset-hub-polkadot")) + .unwrap_or_default(); if let Some(val) = chain_spec_json.pointer_mut(runtime_config_ptr) { let keys: Vec = nodes .iter() - .map(|node| get_node_keys(node, use_stash)) + .map(|node| get_node_keys(node, use_stash, asset_hub_polkadot)) .collect(); val["session"]["keys"] = json!(keys); } else { @@ -934,6 +942,17 @@ fn add_hrmp_channels( ) { if let Some(val) = chain_spec_json.pointer_mut(runtime_config_ptr) { if let Some(preopen_hrmp_channels) = val.pointer_mut("/hrmp/preopenHrmpChannels") { + let hrmp_channels = hrmp_channels + .iter() + .map(|c| { + ( + c.sender(), + c.recipient(), + c.max_capacity(), + c.max_message_size(), + ) + }) + .collect::>(); *preopen_hrmp_channels = json!(hrmp_channels); } else { warn!("⚠️ 'hrmp/preopenHrmpChannels' key not present in runtime config."); @@ -1174,8 +1193,10 @@ mod tests { .unwrap(); assert_eq!(new_hrmp_channels.len(), 2); - assert_eq!(new_hrmp_channels.first().unwrap()["sender"], 100); - assert_eq!(new_hrmp_channels.first().unwrap()["recipient"], 101); + assert_eq!(new_hrmp_channels.first().unwrap()[0], 100); + assert_eq!(new_hrmp_channels.first().unwrap()[1], 101); + assert_eq!(new_hrmp_channels.last().unwrap()[0], 101); + assert_eq!(new_hrmp_channels.last().unwrap()[1], 100); } #[test] @@ -1203,4 +1224,71 @@ mod tests { // assert 'preopenHrmpChannels' is not created assert_eq!(new_hrmp_channels, None); } + + #[test] + fn get_node_keys_works() { + let mut name = String::from("luca"); + let seed = format!("//{}{name}", name.remove(0).to_uppercase()); + let accounts = NodeAccounts { + accounts: generators::generate_node_keys(&seed).unwrap(), + seed, + }; + let node = NodeSpec { + name, + accounts, + ..Default::default() + }; + + let sr = &node.accounts.accounts["sr"]; + let keys = [ + ("babe".into(), sr.address.clone()), + ("im_online".into(), sr.address.clone()), + ("parachain_validator".into(), sr.address.clone()), + ("authority_discovery".into(), sr.address.clone()), + ("para_validator".into(), sr.address.clone()), + ("para_assignment".into(), sr.address.clone()), + ("aura".into(), sr.address.clone()), + ("nimbus".into(), sr.address.clone()), + ("vrf".into(), sr.address.clone()), + ( + "grandpa".into(), + node.accounts.accounts["ed"].address.clone(), + ), + ("beefy".into(), node.accounts.accounts["ec"].address.clone()), + ] + .into(); + + // Stash + let sr_stash = &node.accounts.accounts["sr_stash"]; + let node_key = get_node_keys(&node, true, false); + assert_eq!(node_key.0, sr_stash.address); + assert_eq!(node_key.1, sr_stash.address); + assert_eq!(node_key.2, keys); + // Non-stash + let node_key = get_node_keys(&node, false, false); + assert_eq!(node_key.0, sr.address); + assert_eq!(node_key.1, sr.address); + assert_eq!(node_key.2, keys); + } + + #[test] + fn get_node_keys_supports_asset_hub_polkadot() { + let mut name = String::from("luca"); + let seed = format!("//{}{name}", name.remove(0).to_uppercase()); + let accounts = NodeAccounts { + accounts: generators::generate_node_keys(&seed).unwrap(), + seed, + }; + let node = NodeSpec { + name, + accounts, + ..Default::default() + }; + + let node_key = get_node_keys(&node, false, false); + assert_eq!(node_key.2["aura"], node.accounts.accounts["sr"].address); + + let node_key = get_node_keys(&node, false, true); + assert_eq!(node_key.2["aura"], node.accounts.accounts["ed"].address); + } } diff --git a/crates/orchestrator/src/generators/keystore.rs b/crates/orchestrator/src/generators/keystore.rs index 16eae1ce..0e64ce90 100644 --- a/crates/orchestrator/src/generators/keystore.rs +++ b/crates/orchestrator/src/generators/keystore.rs @@ -20,6 +20,7 @@ pub async fn generate<'a, T>( acc: &NodeAccounts, node_files_path: impl AsRef, scoped_fs: &ScopedFilesystem<'a, T>, + asset_hub_polkadot: bool, ) -> Result, GeneratorError> where T: FileSystem, @@ -32,10 +33,15 @@ where // let filename = encode(k); let filename = match k { - // TODO: add logic for isAssetHubPolkadot - // "aura" => { - // "" - // }, + "aura" if asset_hub_polkadot => { + let pk = acc + .accounts + .get("ed") + .expect(&format!("Key 'ed' should be set for node {THIS_IS_A_BUG}")) + .public_key + .as_str(); + format!("{}{}", encode(k), pk) + }, "babe" | "imon" | "audi" | "asgn" | "para" | "nmbs" | "rand" | "aura" => { let pk = acc .accounts diff --git a/crates/orchestrator/src/lib.rs b/crates/orchestrator/src/lib.rs index b0a78231..8cd98f73 100644 --- a/crates/orchestrator/src/lib.rs +++ b/crates/orchestrator/src/lib.rs @@ -12,6 +12,7 @@ pub mod shared; mod spawner; use std::{ + collections::HashSet, net::IpAddr, path::{Path, PathBuf}, time::Duration, @@ -411,36 +412,102 @@ fn validate_spec_with_provider_capabilities( network_spec: &NetworkSpec, capabilities: &ProviderCapabilities, ) -> Result<(), anyhow::Error> { - if !capabilities.requires_image { - return Ok(()); - } - - // Relaychain - if network_spec.relaychain.default_image.is_none() { - // we should check if each node have an image - let nodes = &network_spec.relaychain.nodes; - if nodes.iter().any(|node| node.image.is_none()) { - return Err(anyhow::anyhow!( - "missing image for node, and not default is set at relaychain" - )); - } - }; + let mut errs: Vec = vec![]; - // Paras - for para in &network_spec.parachains { - if para.default_image.is_none() { - let nodes = ¶.collators; + if capabilities.requires_image { + // Relaychain + if network_spec.relaychain.default_image.is_none() { + // we should check if each node have an image + let nodes = &network_spec.relaychain.nodes; if nodes.iter().any(|node| node.image.is_none()) { - return Err(anyhow::anyhow!( - "missing image for node, and not default is set at parachain {}", - para.id + errs.push(String::from( + "Missing image for node, and not default is set at relaychain", )); } + }; + + // Paras + for para in &network_spec.parachains { + if para.default_image.is_none() { + let nodes = ¶.collators; + if nodes.iter().any(|node| node.image.is_none()) { + errs.push(format!( + "Missing image for node, and not default is set at parachain {}", + para.id + )); + } + } + } + } else { + // native + // We need to get all the `cmds` and verify if are part of the path + let mut cmds: HashSet<&str> = Default::default(); + if let Some(cmd) = network_spec.relaychain.default_command.as_ref() { + cmds.insert(cmd.as_str()); + } + for node in network_spec.relaychain().nodes.iter() { + cmds.insert(node.command()); + } + + // Paras + for para in &network_spec.parachains { + if let Some(cmd) = para.default_command.as_ref() { + cmds.insert(cmd.as_str()); + } + + for node in para.collators.iter() { + cmds.insert(node.command()); + } } + + // now check the binaries + let path = std::env::var("PATH").unwrap_or_default(); // path should always be set + trace!("current PATH: {path}"); + let parts: Vec<_> = path.split(":").collect(); + for cmd in cmds { + let missing = if cmd.contains('/') { + trace!("checking {cmd}"); + std::fs::metadata(cmd).is_err() + } else { + // should be in the PATH + !parts.iter().any(|part| { + let path_to = format!("{}/{}", part, cmd); + trace!("checking {path_to}"); + std::fs::metadata(path_to).is_ok() + }) + }; + + if missing { + errs.push(help_msg(cmd)); + } + } + } + + if !errs.is_empty() { + let msg = errs.join("\n"); + return Err(anyhow::anyhow!(format!("Invalid configuration: \n {msg}"))); } Ok(()) } + +fn help_msg(cmd: &str) -> String { + match cmd { + "parachain-template-node" | "solochain-template-node" | "minimal-template-node" => { + format!("Missing binary {cmd}, compile by running: \n\tcargo build --package {cmd} --release") + }, + "polkadot" => { + format!("Missing binary {cmd}, compile by running (in the polkadot-sdk repo): \n\t cargo build --locked --release --features fast-runtime --bin {cmd} --bin polkadot-prepare-worker --bin polkadot-execute-worker") + }, + "polkadot-parachain" => { + format!("Missing binary {cmd}, compile by running (in the polkadot-sdk repo): \n\t cargo build --release --locked -p {cmd}-bin --bin {cmd}") + }, + _ => { + format!("Missing binary {cmd}, please compile it.") + }, + } +} + // TODO: get the fs from `DynNamespace` will make this not needed // but the FileSystem trait isn't object-safe so we can't pass around // as `dyn FileSystem`. We can refactor or using some `erase` techniques @@ -543,12 +610,15 @@ mod tests { use super::*; - fn generate(with_image: bool) -> Result> { + fn generate( + with_image: bool, + with_cmd: Option<&'static str>, + ) -> Result> { NetworkConfigBuilder::new() .with_relaychain(|r| { let mut relay = r .with_chain("rococo-local") - .with_default_command("polkadot"); + .with_default_command(with_cmd.unwrap_or("polkadot")); if with_image { relay = relay.with_default_image("docker.io/parity/polkadot") } @@ -559,7 +629,9 @@ mod tests { }) .with_parachain(|p| { p.with_id(2000).cumulus_based(true).with_collator(|n| { - let node = n.with_name("collator").with_command("polkadot-parachain"); + let node = n + .with_name("collator") + .with_command(with_cmd.unwrap_or("polkadot-parachain")); if with_image { node.with_image("docker.io/paritypr/test-parachain") } else { @@ -572,7 +644,7 @@ mod tests { #[tokio::test] async fn valid_config_with_image() { - let network_config = generate(true).unwrap(); + let network_config = generate(true, None).unwrap(); let spec = NetworkSpec::from_config(&network_config).await.unwrap(); let caps = ProviderCapabilities { requires_image: true, @@ -586,8 +658,8 @@ mod tests { } #[tokio::test] - async fn invalid_config() { - let network_config = generate(false).unwrap(); + async fn invalid_config_without_image() { + let network_config = generate(false, None).unwrap(); let spec = NetworkSpec::from_config(&network_config).await.unwrap(); let caps = ProviderCapabilities { requires_image: true, @@ -599,4 +671,35 @@ mod tests { let valid = validate_spec_with_provider_capabilities(&spec, &caps); assert!(valid.is_err()) } + + #[tokio::test] + async fn invalid_config_missing_cmd() { + let network_config = generate(false, Some("other")).unwrap(); + let spec = NetworkSpec::from_config(&network_config).await.unwrap(); + let caps = ProviderCapabilities { + requires_image: false, + has_resources: false, + prefix_with_full_path: false, + use_default_ports_in_cmd: false, + }; + + let valid = validate_spec_with_provider_capabilities(&spec, &caps); + assert!(valid.is_err()) + } + + #[tokio::test] + async fn valid_config_present_cmd() { + let network_config = generate(false, Some("cargo")).unwrap(); + let spec = NetworkSpec::from_config(&network_config).await.unwrap(); + let caps = ProviderCapabilities { + requires_image: false, + has_resources: false, + prefix_with_full_path: false, + use_default_ports_in_cmd: false, + }; + + let valid = validate_spec_with_provider_capabilities(&spec, &caps); + println!("{:?}", valid); + assert!(valid.is_ok()) + } } diff --git a/crates/orchestrator/src/network/node.rs b/crates/orchestrator/src/network/node.rs index a4677a3b..fc6a5951 100644 --- a/crates/orchestrator/src/network/node.rs +++ b/crates/orchestrator/src/network/node.rs @@ -7,7 +7,7 @@ use provider::DynNode; use regex::Regex; use serde::Serialize; use subxt::{backend::rpc::RpcClient, OnlineClient}; -use support::net::wait_ws_ready; +use support::net::{skip_err_while_waiting, wait_ws_ready}; use thiserror::Error; use tokio::sync::RwLock; use tracing::{debug, trace}; @@ -206,27 +206,22 @@ impl NetworkNode { return Ok(()); } }, - Err(e) => { - match e.downcast::() { - Ok(io) => { - // if the error is connecting could be the case that the node - // is not listening yet, so we keep waiting - // Skipped err is: 'tcp connect error: Connection refused (os error 61)' - if !io.is_connect() { - return Err(io.into()); - } - }, - Err(other) => { - match other.downcast::() { - Ok(node_err) => { - if !matches!(node_err, NetworkNodeError::MetricNotFound(_)) { - return Err(node_err.into()); - } - }, - Err(other) => return Err(other), - }; - }, - } + Err(e) => match e.downcast::() { + Ok(io_err) => { + if !skip_err_while_waiting(&io_err) { + return Err(io_err.into()); + } + }, + Err(other) => { + match other.downcast::() { + Ok(node_err) => { + if !matches!(node_err, NetworkNodeError::MetricNotFound(_)) { + return Err(node_err.into()); + } + }, + Err(other) => return Err(other), + }; + }, }, } diff --git a/crates/orchestrator/src/network_spec.rs b/crates/orchestrator/src/network_spec.rs index 3c8285cf..1bbeecbb 100644 --- a/crates/orchestrator/src/network_spec.rs +++ b/crates/orchestrator/src/network_spec.rs @@ -49,16 +49,25 @@ impl NetworkSpec { } } - Ok(NetworkSpec { - relaychain, - parachains, - hrmp_channels: network_config - .hrmp_channels() + if errs.is_empty() { + Ok(NetworkSpec { + relaychain, + parachains, + hrmp_channels: network_config + .hrmp_channels() + .into_iter() + .cloned() + .collect(), + global_settings: network_config.global_settings().clone(), + }) + } else { + let errs_str = errs .into_iter() - .cloned() - .collect(), - global_settings: network_config.global_settings().clone(), - }) + .map(|e| e.to_string()) + .collect::>() + .join("\n"); + Err(OrchestratorError::InvalidConfig(errs_str)) + } } pub async fn populate_nodes_available_args( diff --git a/crates/orchestrator/src/network_spec/parachain.rs b/crates/orchestrator/src/network_spec/parachain.rs index 3b38d25a..4bea46b7 100644 --- a/crates/orchestrator/src/network_spec/parachain.rs +++ b/crates/orchestrator/src/network_spec/parachain.rs @@ -77,14 +77,15 @@ impl ParachainSpec { cmd } else if let Some(first_node) = config.collators().first() { let Some(cmd) = first_node.command() else { - return Err(OrchestratorError::InvalidConfig("Parachain, either default_command or command in the first node needs to be set.".to_string())); + return Err(OrchestratorError::InvalidConfig(format!("Parachain {}, either default_command or command in the first node needs to be set.", config.id()))); }; cmd } else { - return Err(OrchestratorError::InvalidConfig( - "Parachain without nodes and default_command isn't set.".to_string(), - )); + return Err(OrchestratorError::InvalidConfig(format!( + "Parachain {}, without nodes and default_command isn't set.", + config.id() + ))); }; // TODO: internally we use image as String diff --git a/crates/orchestrator/src/spawner.rs b/crates/orchestrator/src/spawner.rs index 14195583..2ae65c04 100644 --- a/crates/orchestrator/src/spawner.rs +++ b/crates/orchestrator/src/spawner.rs @@ -61,10 +61,18 @@ where } else { node.name.clone() }; - let key_filenames = - generators::generate_node_keystore(&node.accounts, &node_files_path, ctx.scoped_fs) - .await - .unwrap(); + let asset_hub_polkadot = ctx + .parachain_id + .map(|id| id.starts_with("asset-hub-polkadot")) + .unwrap_or_default(); + let key_filenames = generators::generate_node_keystore( + &node.accounts, + &node_files_path, + ctx.scoped_fs, + asset_hub_polkadot, + ) + .await + .unwrap(); // Paths returned are relative to the base dir, we need to convert into // fullpaths to inject them in the nodes. diff --git a/crates/provider/src/docker/node.rs b/crates/provider/src/docker/node.rs index 4828b6e2..50c1a69b 100644 --- a/crates/provider/src/docker/node.rs +++ b/crates/provider/src/docker/node.rs @@ -392,7 +392,7 @@ where .map_err(|err| { ProviderError::RunCommandError( format!("sh -c {}", &command.join(" ")), - self.name.to_string(), + format!("in pod {}", self.name), err.into(), ) }) diff --git a/crates/provider/src/kubernetes/node.rs b/crates/provider/src/kubernetes/node.rs index 0fa419c6..709e2bf5 100644 --- a/crates/provider/src/kubernetes/node.rs +++ b/crates/provider/src/kubernetes/node.rs @@ -512,7 +512,7 @@ where .map_err(|err| { ProviderError::RunCommandError( format!("sh -c {}", &command.join(" ")), - self.name.to_string(), + format!("in pod {}", self.name), err.into(), ) }) diff --git a/crates/provider/src/lib.rs b/crates/provider/src/lib.rs index 2e87cfb1..d52ec7ef 100644 --- a/crates/provider/src/lib.rs +++ b/crates/provider/src/lib.rs @@ -34,7 +34,7 @@ pub enum ProviderError { #[error("Failed to spawn node '{0}': {1}")] NodeSpawningFailed(String, anyhow::Error), - #[error("Error running command '{0}' in pod {1}: {2}")] + #[error("Error running command '{0}' {1}: {2}")] RunCommandError(String, String, anyhow::Error), #[error("Error running script'{0}': {1}")] diff --git a/crates/provider/src/native/node.rs b/crates/provider/src/native/node.rs index 7faee955..f4dc44d2 100644 --- a/crates/provider/src/native/node.rs +++ b/crates/provider/src/native/node.rs @@ -442,7 +442,7 @@ where .map_err(|err| { ProviderError::RunCommandError( format!("{} {}", &options.program, &options.args.join(" ")), - options.program, + "locally".to_string(), err.into(), ) })?; diff --git a/crates/support/src/net.rs b/crates/support/src/net.rs index cf6ddcb7..9631fc83 100644 --- a/crates/support/src/net.rs +++ b/crates/support/src/net.rs @@ -35,13 +35,7 @@ pub async fn wait_ws_ready(url: &str) -> Result<()> { trace!("http_client status: {}, continuing...", res.status()); }, Err(e) => { - // if the error is connecting/request could be the case that the node - // is not listening yet, so we keep waiting - // Skipped errs like: - // 'tcp connect error: Connection refused (os error 61)' - // 'operation was canceled: connection closed before message completed' - // 'connection error: Connection reset by peer (os error 54)' - if !(e.is_connect() || e.is_request()) { + if !skip_err_while_waiting(&e) { return Err(e.into()); } @@ -54,3 +48,13 @@ pub async fn wait_ws_ready(url: &str) -> Result<()> { Ok(()) } + +pub fn skip_err_while_waiting(e: &reqwest::Error) -> bool { + // if the error is connecting/request could be the case that the node + // is not listening yet, so we keep waiting + // Skipped errs like: + // 'tcp connect error: Connection refused (os error 61)' + // 'operation was canceled: connection closed before message completed' + // 'connection error: Connection reset by peer (os error 54)' + e.is_connect() || e.is_request() +}