From b29e83ac7c4ade6fe055afc56deaee707b6471ff Mon Sep 17 00:00:00 2001 From: Maksym Arutyunyan <103510076+maksymar@users.noreply.github.com> Date: Thu, 24 Oct 2024 16:01:44 +0200 Subject: [PATCH 1/4] feat: enable allowed_viewers feature for canister log visibility (#2244) This PR enables `allowed_viewers` for canister log visibility. EXC-1697 --- rs/config/src/execution_environment.rs | 2 +- rs/execution_environment/tests/canister_logging.rs | 3 +-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/rs/config/src/execution_environment.rs b/rs/config/src/execution_environment.rs index c9e68687c4d..e55768e39bc 100644 --- a/rs/config/src/execution_environment.rs +++ b/rs/config/src/execution_environment.rs @@ -360,7 +360,7 @@ impl Default for Config { dirty_page_logging: FlagStatus::Disabled, max_canister_http_requests_in_flight: MAX_CANISTER_HTTP_REQUESTS_IN_FLIGHT, default_wasm_memory_limit: DEFAULT_WASM_MEMORY_LIMIT, - allowed_viewers_feature: FlagStatus::Disabled, + allowed_viewers_feature: FlagStatus::Enabled, } } } diff --git a/rs/execution_environment/tests/canister_logging.rs b/rs/execution_environment/tests/canister_logging.rs index 6e697e1d432..afd2bc2a9a7 100644 --- a/rs/execution_environment/tests/canister_logging.rs +++ b/rs/execution_environment/tests/canister_logging.rs @@ -280,8 +280,7 @@ fn test_log_visibility_of_fetch_canister_logs() { ( LogVisibilityV2::AllowedViewers(allowed_viewers.clone()), allowed_viewer, - // TODO(EXC-1675): when disabled works as for controllers, change to ok when enabled. - not_allowed_error(&allowed_viewer), + ok.clone(), ), ( LogVisibilityV2::AllowedViewers(allowed_viewers.clone()), From 993fc858670783db5977100137e343e60418d75d Mon Sep 17 00:00:00 2001 From: nabdullindfinity <135595192+nabdullindfinity@users.noreply.github.com> Date: Thu, 24 Oct 2024 16:56:02 +0200 Subject: [PATCH 2/4] feat: initial draft of custom metric tool and its systemd timer (#1963) To support the performance monitoring on mainnet, add a tool where custom metrics can be calculated and exported to prometheus's `node_exporter` through the `textfile` collector. Currently, the total number of TLB shootdowns across all CPUs will be exposed as `sum_tlb_shootdowns`, collected once per minute, as the latest `node_exporter` does not allow filtering of data of its built-in `interrupts` collector that could otherwise do it for us (until https://github.com/prometheus/node_exporter/pull/3028 is included in the release branches) and will add many metrics with high cardinality otherwise. NODE-1445 --- Cargo.lock | 8 + Cargo.toml | 1 + ic-os/components/guestos.bzl | 2 + .../custom-metrics/metrics_tool.service | 33 ++++ .../custom-metrics/metrics_tool.timer | 10 + ic-os/guestos/defs.bzl | 1 + rs/ic_os/metrics_tool/BUILD.bazel | 54 +++++ rs/ic_os/metrics_tool/Cargo.toml | 12 ++ rs/ic_os/metrics_tool/src/lib.rs | 187 ++++++++++++++++++ rs/ic_os/metrics_tool/src/main.rs | 62 ++++++ rs/ic_os/release/BUILD.bazel | 1 + 11 files changed, 371 insertions(+) create mode 100644 ic-os/components/monitoring/custom-metrics/metrics_tool.service create mode 100644 ic-os/components/monitoring/custom-metrics/metrics_tool.timer create mode 100644 rs/ic_os/metrics_tool/BUILD.bazel create mode 100644 rs/ic_os/metrics_tool/Cargo.toml create mode 100644 rs/ic_os/metrics_tool/src/lib.rs create mode 100644 rs/ic_os/metrics_tool/src/main.rs diff --git a/Cargo.lock b/Cargo.lock index 90a6ea8321a..83e39dd41c9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9586,6 +9586,14 @@ version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b5c7628eac357aecda461130f8074468be5aa4d258a002032d82d817f79f1f8" +[[package]] +name = "ic-metrics-tool" +version = "0.1.0" +dependencies = [ + "anyhow", + "clap 4.5.20", +] + [[package]] name = "ic-nervous-system-agent" version = "0.0.1" diff --git a/Cargo.toml b/Cargo.toml index 3cec060dbc2..bd2a68f8f09 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -126,6 +126,7 @@ members = [ "rs/ic_os/build_tools/diroid", "rs/ic_os/config", "rs/ic_os/fstrim_tool", + "rs/ic_os/metrics_tool", "rs/ic_os/os_tools/guestos_tool", "rs/ic_os/os_tools/hostos_tool", "rs/ic_os/build_tools/inject_files", diff --git a/ic-os/components/guestos.bzl b/ic-os/components/guestos.bzl index 5bf638bafc7..c21dcbf4976 100644 --- a/ic-os/components/guestos.bzl +++ b/ic-os/components/guestos.bzl @@ -85,6 +85,8 @@ component_files = { Label("monitoring/journald.conf"): "/etc/systemd/journald.conf", Label("monitoring/nft-exporter/nft-exporter.service"): "/etc/systemd/system/nft-exporter.service", Label("monitoring/nft-exporter/nft-exporter.timer"): "/etc/systemd/system/nft-exporter.timer", + Label("monitoring/custom-metrics/metrics_tool.service"): "/etc/systemd/system/metrics_tool.service", + Label("monitoring/custom-metrics/metrics_tool.timer"): "/etc/systemd/system/metrics_tool.timer", # networking Label("networking/generate-network-config/guestos/generate-network-config.service"): "/etc/systemd/system/generate-network-config.service", diff --git a/ic-os/components/monitoring/custom-metrics/metrics_tool.service b/ic-os/components/monitoring/custom-metrics/metrics_tool.service new file mode 100644 index 00000000000..c03e1125ab8 --- /dev/null +++ b/ic-os/components/monitoring/custom-metrics/metrics_tool.service @@ -0,0 +1,33 @@ +[Unit] +Description=Report custom metrics once per minute + +[Service] +Type=oneshot +ExecStart=/opt/ic/bin/metrics_tool --metrics /run/node_exporter/collector_textfile/custom_metrics.prom +DeviceAllow=/dev/vda +IPAddressDeny=any +LockPersonality=yes +MemoryDenyWriteExecute=yes +NoNewPrivileges=yes +PrivateDevices=no +PrivateNetwork=yes +PrivateTmp=yes +PrivateUsers=no +ProtectClock=yes +ProtectControlGroups=yes +ProtectHome=yes +ProtectHostname=yes +ProtectKernelModules=yes +ProtectKernelTunables=yes +ProtectSystem=strict +ReadOnlyPaths=/proc/interrupts +ReadWritePaths=/run/node_exporter/collector_textfile +RestrictAddressFamilies=AF_UNIX +RestrictAddressFamilies=~AF_UNIX +RestrictNamespaces=yes +RestrictRealtime=yes +RestrictSUIDSGID=yes +SystemCallArchitectures=native +SystemCallErrorNumber=EPERM +SystemCallFilter=@system-service +UMask=022 diff --git a/ic-os/components/monitoring/custom-metrics/metrics_tool.timer b/ic-os/components/monitoring/custom-metrics/metrics_tool.timer new file mode 100644 index 00000000000..7015869a5f0 --- /dev/null +++ b/ic-os/components/monitoring/custom-metrics/metrics_tool.timer @@ -0,0 +1,10 @@ +[Unit] +Description=Collect custom metrics every minute + +[Timer] +OnBootSec=60s +OnUnitActiveSec=60s +Unit=metrics_tool.service + +[Install] +WantedBy=timers.target diff --git a/ic-os/guestos/defs.bzl b/ic-os/guestos/defs.bzl index 30e3d49787b..177302685d2 100644 --- a/ic-os/guestos/defs.bzl +++ b/ic-os/guestos/defs.bzl @@ -50,6 +50,7 @@ def image_deps(mode, malicious = False): "//cpp:infogetty": "/opt/ic/bin/infogetty:0755", # Terminal manager that replaces the login shell. "//cpp:prestorecon": "/opt/ic/bin/prestorecon:0755", # Parallel restorecon replacement for filesystem relabeling. "//rs/ic_os/release:metrics-proxy": "/opt/ic/bin/metrics-proxy:0755", # Proxies, filters, and serves public node metrics. + "//rs/ic_os/release:metrics_tool": "/opt/ic/bin/metrics_tool:0755", # Collects and reports custom metrics. # additional libraries to install "//rs/ic_os/release:nss_icos": "/usr/lib/x86_64-linux-gnu/libnss_icos.so.2:0644", # Allows referring to the guest IPv6 by name guestos from host, and host as hostos from guest. diff --git a/rs/ic_os/metrics_tool/BUILD.bazel b/rs/ic_os/metrics_tool/BUILD.bazel new file mode 100644 index 00000000000..b182a7a8a93 --- /dev/null +++ b/rs/ic_os/metrics_tool/BUILD.bazel @@ -0,0 +1,54 @@ +load("@rules_rust//rust:defs.bzl", "rust_binary", "rust_library", "rust_test", "rust_test_suite") + +package(default_visibility = ["//rs:ic-os-pkg"]) + +DEPENDENCIES = [ + # Keep sorted. + "//rs/sys", + "@crate_index//:anyhow", + "@crate_index//:clap", +] + +DEV_DEPENDENCIES = [ + # Keep sorted. +] + +MACRO_DEPENDENCIES = [] + +ALIASES = {} + +rust_library( + name = "metrics_tool", + srcs = glob( + ["src/**/*.rs"], + exclude = ["src/main.rs"], + ), + aliases = ALIASES, + crate_name = "ic_metrics_tool", + proc_macro_deps = MACRO_DEPENDENCIES, + visibility = ["//rs:system-tests-pkg"], + deps = DEPENDENCIES, +) + +rust_binary( + name = "metrics_tool_bin", + srcs = ["src/main.rs"], + aliases = ALIASES, + proc_macro_deps = MACRO_DEPENDENCIES, + deps = DEPENDENCIES + [":metrics_tool"], +) + +rust_test( + name = "metrics_tool_test", + crate = ":metrics_tool", + deps = DEPENDENCIES + DEV_DEPENDENCIES, +) + +rust_test_suite( + name = "metrics_tool_integration", + srcs = glob(["tests/**/*.rs"]), + target_compatible_with = [ + "@platforms//os:linux", + ], + deps = [":metrics_tool_bin"] + DEPENDENCIES + DEV_DEPENDENCIES, +) diff --git a/rs/ic_os/metrics_tool/Cargo.toml b/rs/ic_os/metrics_tool/Cargo.toml new file mode 100644 index 00000000000..4e6b604e829 --- /dev/null +++ b/rs/ic_os/metrics_tool/Cargo.toml @@ -0,0 +1,12 @@ +[package] +name = "ic-metrics-tool" +version = "0.1.0" +edition = "2021" + +[[bin]] +name = "metrics_tool" +path = "src/main.rs" + +[dependencies] +anyhow = { workspace = true } +clap = { workspace = true } \ No newline at end of file diff --git a/rs/ic_os/metrics_tool/src/lib.rs b/rs/ic_os/metrics_tool/src/lib.rs new file mode 100644 index 00000000000..02bb129c55d --- /dev/null +++ b/rs/ic_os/metrics_tool/src/lib.rs @@ -0,0 +1,187 @@ +// TODO: refactor/merge this with fstrim_tool and guestos_tool metrics functionality +use std::fs::File; +use std::io::{self, Write}; +use std::path::Path; + +// TODO: everything is floating point for now +pub struct Metric { + name: String, + value: f64, + annotation: String, + labels: Vec<(String, String)>, +} + +impl Metric { + pub fn new(name: &str, value: f64) -> Self { + Self { + name: name.to_string(), + value, + annotation: "Custom metric".to_string(), + labels: Vec::new(), + } + } + pub fn with_annotation(name: &str, value: f64, annotation: &str) -> Self { + Self { + name: name.to_string(), + value, + annotation: annotation.to_string(), + labels: Vec::new(), + } + } + + pub fn add_annotation(mut self, annotation: &str) -> Self { + self.annotation = annotation.to_string(); + self + } + + pub fn add_label(mut self, key: &str, value: &str) -> Self { + self.labels.push((key.to_string(), value.to_string())); + self + } + + // TODO: formatting of floats + // Convert to prometheus exposition format + pub fn to_prom_string(&self) -> String { + let labels_str = if self.labels.is_empty() { + String::new() + } else { + let labels: Vec = self + .labels + .iter() + .map(|(k, v)| format!("{}=\"{}\"", k, v)) + .collect(); + format!("{{{}}}", labels.join(",")) + }; + format!( + "# HELP {} {}\n\ + # TYPE {} counter\n\ + {}{} {}", + self.name, self.annotation, self.name, self.name, labels_str, self.value + ) + } +} + +pub struct MetricsWriter { + file_path: String, +} + +impl MetricsWriter { + pub fn new(file_path: &str) -> Self { + Self { + file_path: file_path.to_string(), + } + } + + pub fn write_metrics(&self, metrics: &[Metric]) -> io::Result<()> { + let path = Path::new(&self.file_path); + let mut file = File::create(path)?; + for metric in metrics { + writeln!(file, "{}", metric.to_prom_string())?; + } + Ok(()) + } +} +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_metric_to_string() { + let metric = Metric::new("test_metric", 123.45) + .add_label("label1", "value1") + .add_label("label2", "value2"); + assert_eq!( + metric.to_prom_string(), + "# HELP test_metric Custom metric\n\ + # TYPE test_metric counter\n\ + test_metric{label1=\"value1\",label2=\"value2\"} 123.45" + ); + } + + #[test] + fn test_write_metrics() { + let metrics = vec![ + Metric::new("metric1", 1.0), + Metric::new("metric2", 2.0).add_label("label", "value"), + ]; + let writer = MetricsWriter::new("/tmp/test_metrics.prom"); + writer.write_metrics(&metrics).unwrap(); + let content = std::fs::read_to_string("/tmp/test_metrics.prom").unwrap(); + assert!(content.contains( + "# HELP metric1 Custom metric\n\ + # TYPE metric1 counter\n\ + metric1 1" + )); + assert!(content.contains( + "# HELP metric2 Custom metric\n\ + # TYPE metric2 counter\n\ + metric2{label=\"value\"} 2" + )); + } + + #[test] + fn test_metric_large_value() { + let metric = Metric::new("large_value_metric", 1.0e64); + assert_eq!( + metric.to_prom_string(), + "# HELP large_value_metric Custom metric\n\ + # TYPE large_value_metric counter\n\ + large_value_metric 10000000000000000000000000000000000000000000000000000000000000000" + ); + } + + #[test] + fn test_metric_without_labels() { + let metric = Metric::new("no_label_metric", 42.0); + assert_eq!( + metric.to_prom_string(), + "# HELP no_label_metric Custom metric\n\ + # TYPE no_label_metric counter\n\ + no_label_metric 42" + ); + } + + #[test] + fn test_metric_with_annotation() { + let metric = Metric::with_annotation("annotated_metric", 99.9, "This is a test metric"); + assert_eq!( + metric.to_prom_string(), + "# HELP annotated_metric This is a test metric\n\ + # TYPE annotated_metric counter\n\ + annotated_metric 99.9" + ); + } + + #[test] + fn test_write_empty_metrics() { + let metrics: Vec = Vec::new(); + let writer = MetricsWriter::new("/tmp/test_empty_metrics.prom"); + writer.write_metrics(&metrics).unwrap(); + let content = std::fs::read_to_string("/tmp/test_empty_metrics.prom").unwrap(); + assert!(content.is_empty()); + } + + #[test] + fn test_metric_with_multiple_labels() { + let metric = Metric::new("multi_label_metric", 10.0) + .add_label("foo", "bar") + .add_label("version", "1.0.0"); + assert_eq!( + metric.to_prom_string(), + "# HELP multi_label_metric Custom metric\n\ + # TYPE multi_label_metric counter\n\ + multi_label_metric{foo=\"bar\",version=\"1.0.0\"} 10" + ); + } + + #[test] + fn test_metric_with_empty_annotation() { + let metric = Metric::with_annotation("empty_annotation_metric", 5.5, ""); + assert_eq!( + metric.to_prom_string(), + "# HELP empty_annotation_metric \n\ + # TYPE empty_annotation_metric counter\n\ + empty_annotation_metric 5.5" + ); + } +} diff --git a/rs/ic_os/metrics_tool/src/main.rs b/rs/ic_os/metrics_tool/src/main.rs new file mode 100644 index 00000000000..cfd4a295c3f --- /dev/null +++ b/rs/ic_os/metrics_tool/src/main.rs @@ -0,0 +1,62 @@ +use anyhow::Result; +use clap::Parser; + +use std::fs::File; +use std::io::{self, BufRead}; +use std::path::Path; + +use ic_metrics_tool::{Metric, MetricsWriter}; + +const INTERRUPT_FILTER: &str = "TLB shootdowns"; +const INTERRUPT_SOURCE: &str = "/proc/interrupts"; +const CUSTOM_METRICS_PROM: &str = "/run/node_exporter/collector_textfile/custom_metrics.prom"; +const TLB_SHOOTDOWN_METRIC_NAME: &str = "sum_tlb_shootdowns"; +const TLB_SHOOTDOWN_METRIC_ANNOTATION: &str = "Total TLB shootdowns"; + +#[derive(Parser)] +struct MetricToolArgs { + #[arg( + short = 'm', + long = "metrics", + default_value = CUSTOM_METRICS_PROM + )] + /// Filename to write the prometheus metrics for node_exporter generation. + /// Fails badly if the directory doesn't exist. + metrics_filename: String, +} + +fn get_sum_tlb_shootdowns() -> Result { + let path = Path::new(INTERRUPT_SOURCE); + let file = File::open(path)?; + let reader = io::BufReader::new(file); + + let mut total_tlb_shootdowns = 0; + + for line in reader.lines() { + let line = line?; + if line.contains(INTERRUPT_FILTER) { + for part in line.split_whitespace().skip(1) { + if let Ok(value) = part.parse::() { + total_tlb_shootdowns += value; + } + } + } + } + + Ok(total_tlb_shootdowns) +} + +pub fn main() -> Result<()> { + let opts = MetricToolArgs::parse(); + let mpath = Path::new(&opts.metrics_filename); + let tlb_shootdowns = get_sum_tlb_shootdowns()?; + + let metrics = vec![ + Metric::new(TLB_SHOOTDOWN_METRIC_NAME, tlb_shootdowns as f64) + .add_annotation(TLB_SHOOTDOWN_METRIC_ANNOTATION), + ]; + let writer = MetricsWriter::new(mpath.to_str().unwrap()); + writer.write_metrics(&metrics).unwrap(); + + Ok(()) +} diff --git a/rs/ic_os/release/BUILD.bazel b/rs/ic_os/release/BUILD.bazel index d5a5eb40598..902e58a3082 100644 --- a/rs/ic_os/release/BUILD.bazel +++ b/rs/ic_os/release/BUILD.bazel @@ -13,6 +13,7 @@ OBJECTS = { "vsock_host": "//rs/ic_os/vsock/host:vsock_host", "metrics-proxy": "@crate_index//:metrics-proxy__metrics-proxy", "nss_icos": "//rs/ic_os/nss_icos", + "metrics_tool": "//rs/ic_os/metrics_tool:metrics_tool_bin", } [release_strip_binary( From c705212d5b08b808262cf314d161d658527792b3 Mon Sep 17 00:00:00 2001 From: NikolasHai <113891786+NikolasHai@users.noreply.github.com> Date: Thu, 24 Oct 2024 17:07:16 +0200 Subject: [PATCH 3/4] feat(ICP-Rosetta): FI-1540: add disburse of neuron functionality (#2182) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This MR proposes the following changes: 1. Add the functionality of disbursing a neuron to the ICP Rosetta client --------- Co-authored-by: Mathias Björkqvist --- Cargo.lock | 1 + rs/rosetta-api/icp/BUILD.bazel | 9 +- rs/rosetta-api/icp/Cargo.toml | 1 + rs/rosetta-api/icp/client/src/lib.rs | 135 ++++++-- .../common/system_test_environment.rs | 101 ++++-- .../icp/tests/system_tests/common/utils.rs | 15 + .../test_cases/neuron_management.rs | 162 +++++++++- rs/tests/Cargo.toml | 4 - .../rosetta/BUILD.bazel | 21 -- .../rosetta/rosetta_neuron_disburse_test.rs | 21 -- rs/tests/src/rosetta_tests/tests.rs | 1 - .../rosetta_tests/tests/neuron_disburse.rs | 300 ------------------ 12 files changed, 369 insertions(+), 402 deletions(-) delete mode 100644 rs/tests/financial_integrations/rosetta/rosetta_neuron_disburse_test.rs delete mode 100644 rs/tests/src/rosetta_tests/tests/neuron_disburse.rs diff --git a/Cargo.lock b/Cargo.lock index 83e39dd41c9..91cc39c4a32 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -11481,6 +11481,7 @@ dependencies = [ "prost 0.13.3", "rand 0.8.5", "rand_chacha 0.3.1", + "registry-canister", "reqwest 0.12.8", "rolling-file", "rosetta-core", diff --git a/rs/rosetta-api/icp/BUILD.bazel b/rs/rosetta-api/icp/BUILD.bazel index da51dfa29e6..388bfcbdfdb 100644 --- a/rs/rosetta-api/icp/BUILD.bazel +++ b/rs/rosetta-api/icp/BUILD.bazel @@ -79,6 +79,7 @@ DEV_DEPENDENCIES = [ "//rs/nns/governance/init", "//rs/nns/handlers/root/impl:root", "//rs/nns/test_utils", + "//rs/registry/canister", "//rs/rosetta-api/icp:rosetta-api", "//rs/rosetta-api/icp/client:ic-icp-rosetta-client", "//rs/rosetta-api/icp/ledger_canister_blocks_synchronizer/test_utils", @@ -168,9 +169,11 @@ rust_test_suite_with_extra_srcs( "//rs/canister_sandbox", "//rs/canister_sandbox:sandbox_launcher", "//rs/ledger_suite/icp/ledger:ledger-canister-wasm-notify-method", - "//rs/nns/governance:governance-canister", + "//rs/nns/governance:governance-canister-test", + "//rs/nns/handlers/lifeline/impl:lifeline_canister", "//rs/nns/handlers/root/impl:root-canister", "//rs/pocket_ic_server:pocket-ic-server", + "//rs/registry/canister:registry-canister", "//rs/replica", "//rs/rosetta-api/icp:ic-rosetta-api-rosetta-blocks", "//rs/rosetta-api/icp:rosetta-api", @@ -184,8 +187,10 @@ rust_test_suite_with_extra_srcs( "ROSETTA_BIN_PATH": "$(rootpath //rs/rosetta-api/icp:ic-rosetta-api-rosetta-blocks)", "SANDBOX_LAUNCHER": "$(rootpath //rs/canister_sandbox:sandbox_launcher)", "ICP_LEDGER_DEPLOYED_VERSION_WASM_PATH": "$(rootpath @mainnet_icp_ledger_canister//file)", - "GOVERNANCE_CANISTER_WASM_PATH": "$(rootpath //rs/nns/governance:governance-canister)", + "GOVERNANCE_CANISTER_WASM_PATH": "$(rootpath //rs/nns/governance:governance-canister-test)", "ROOT_CANISTER_WASM_PATH": "$(rootpath //rs/nns/handlers/root/impl:root-canister)", + "REGISTRY_CANISTER_WASM_PATH": "$(rootpath //rs/registry/canister:registry-canister)", + "LIFELINE_CANISTER_WASM_PATH": "$(rootpath //rs/nns/handlers/lifeline/impl:lifeline_canister)", }, extra_srcs = glob([ "tests/system_tests/common/*.rs", diff --git a/rs/rosetta-api/icp/Cargo.toml b/rs/rosetta-api/icp/Cargo.toml index baf8963db53..a013e7f430c 100644 --- a/rs/rosetta-api/icp/Cargo.toml +++ b/rs/rosetta-api/icp/Cargo.toml @@ -38,6 +38,7 @@ num-bigint = { workspace = true } on_wire = { path = "../../rust_canisters/on_wire" } prometheus = { workspace = true } rand = { workspace = true } +registry-canister = { path = "../../registry/canister" } reqwest = { workspace = true } rolling-file = { workspace = true } rosetta-core = { path = "../common/rosetta_core" } diff --git a/rs/rosetta-api/icp/client/src/lib.rs b/rs/rosetta-api/icp/client/src/lib.rs index e5e0fe455f9..934728fbbc7 100644 --- a/rs/rosetta-api/icp/client/src/lib.rs +++ b/rs/rosetta-api/icp/client/src/lib.rs @@ -11,6 +11,7 @@ use ic_rosetta_api::models::ConstructionMetadataRequestOptions; use ic_rosetta_api::models::ConstructionPayloadsRequestMetadata; use ic_rosetta_api::models::OperationIdentifier; use ic_rosetta_api::request_types::ChangeAutoStakeMaturityMetadata; +use ic_rosetta_api::request_types::DisburseMetadata; use ic_rosetta_api::request_types::NeuronIdentifierMetadata; use ic_rosetta_api::request_types::RequestType; use ic_rosetta_api::request_types::SetDissolveTimestampMetadata; @@ -327,6 +328,35 @@ impl RosettaClient { }]) } + pub async fn build_disburse_neuron_operations( + signer_principal: Principal, + neuron_index: u64, + recipient: Option, + ) -> anyhow::Result> { + Ok(vec![Operation { + operation_identifier: OperationIdentifier { + index: 0, + network_index: None, + }, + related_operations: None, + type_: "DISBURSE".to_string(), + status: None, + account: Some(rosetta_core::identifiers::AccountIdentifier::from( + AccountIdentifier::new(PrincipalId(signer_principal), None), + )), + amount: None, + coin_change: None, + metadata: Some( + DisburseMetadata { + neuron_index, + recipient, + } + .try_into() + .map_err(|e| anyhow::anyhow!("Failed to convert metadata: {:?}", e))?, + ), + }]) + } + pub async fn network_list(&self) -> anyhow::Result { self.call_endpoint("/network/list", &MetadataRequest { metadata: None }) .await @@ -899,6 +929,33 @@ impl RosettaClient { ) .await } + + /// If a neuron is in the state DISSOLVED you can disburse the neuron with this function. + pub async fn disburse_neuron( + &self, + network_identifier: NetworkIdentifier, + signer_keypair: &T, + disburse_neuron_args: RosettaDisburseNeuronArgs, + ) -> anyhow::Result + where + T: RosettaSupportedKeyPair, + { + let disburse_neuron_operations = RosettaClient::build_disburse_neuron_operations( + signer_keypair.generate_principal_id()?.0, + disburse_neuron_args.neuron_index, + disburse_neuron_args.recipient, + ) + .await?; + + self.make_submit_and_wait_for_transaction( + signer_keypair, + network_identifier, + disburse_neuron_operations, + None, + None, + ) + .await + } } pub struct RosettaTransferArgs { @@ -1083,6 +1140,41 @@ impl RosettaIncreaseNeuronStakeArgs { } } +pub struct RosettaIncreaseNeuronStakeArgsBuilder { + additional_stake: Nat, + neuron_index: Option, + // The subaccount from which the ICP should be transferred + from_subaccount: Option<[u8; 32]>, +} + +impl RosettaIncreaseNeuronStakeArgsBuilder { + pub fn new(additional_stake: Nat) -> Self { + Self { + additional_stake, + neuron_index: None, + from_subaccount: None, + } + } + + pub fn with_neuron_index(mut self, neuron_index: u64) -> Self { + self.neuron_index = Some(neuron_index); + self + } + + pub fn with_from_subaccount(mut self, from_subaccount: Subaccount) -> Self { + self.from_subaccount = Some(from_subaccount); + self + } + + pub fn build(self) -> RosettaIncreaseNeuronStakeArgs { + RosettaIncreaseNeuronStakeArgs { + additional_stake: self.additional_stake, + neuron_index: self.neuron_index, + from_subaccount: self.from_subaccount, + } + } +} + pub struct RosettaChangeAutoStakeMaturityArgs { pub neuron_index: Option, pub requested_setting_for_auto_stake_maturity: bool, @@ -1122,38 +1214,39 @@ impl RosettaChangeAutoStakeMaturityArgsBuilder { } } } +pub struct RosettaDisburseNeuronArgs { + pub neuron_index: u64, + pub recipient: Option, +} -pub struct RosettaIncreaseNeuronStakeArgsBuilder { - additional_stake: Nat, - neuron_index: Option, - // The subaccount from which the ICP should be transferred - from_subaccount: Option<[u8; 32]>, +impl RosettaDisburseNeuronArgs { + pub fn builder(neuron_index: u64) -> RosettaDisburseNeuronArgsBuilder { + RosettaDisburseNeuronArgsBuilder::new(neuron_index) + } } -impl RosettaIncreaseNeuronStakeArgsBuilder { - pub fn new(additional_stake: Nat) -> Self { +pub struct RosettaDisburseNeuronArgsBuilder { + neuron_index: u64, + recipient: Option, +} + +impl RosettaDisburseNeuronArgsBuilder { + pub fn new(neuron_index: u64) -> Self { Self { - additional_stake, - neuron_index: None, - from_subaccount: None, + neuron_index, + recipient: None, } } - pub fn with_neuron_index(mut self, neuron_index: u64) -> Self { - self.neuron_index = Some(neuron_index); + pub fn with_recipient(mut self, recipient: AccountIdentifier) -> Self { + self.recipient = Some(recipient); self } - pub fn with_from_subaccount(mut self, from_subaccount: Subaccount) -> Self { - self.from_subaccount = Some(from_subaccount); - self - } - - pub fn build(self) -> RosettaIncreaseNeuronStakeArgs { - RosettaIncreaseNeuronStakeArgs { - additional_stake: self.additional_stake, + pub fn build(self) -> RosettaDisburseNeuronArgs { + RosettaDisburseNeuronArgs { neuron_index: self.neuron_index, - from_subaccount: self.from_subaccount, + recipient: self.recipient, } } } diff --git a/rs/rosetta-api/icp/tests/system_tests/common/system_test_environment.rs b/rs/rosetta-api/icp/tests/system_tests/common/system_test_environment.rs index 4dd59ea837d..a27b8c7e2fa 100644 --- a/rs/rosetta-api/icp/tests/system_tests/common/system_test_environment.rs +++ b/rs/rosetta-api/icp/tests/system_tests/common/system_test_environment.rs @@ -1,5 +1,6 @@ use crate::common::utils::get_custom_agent; -use crate::common::utils::wait_for_rosetta_to_sync_up_to_block; +use crate::common::utils::get_test_agent; +use crate::common::utils::wait_for_rosetta_to_catch_up_with_icp_ledger; use crate::common::{ constants::{DEFAULT_INITIAL_BALANCE, STARTING_CYCLES_PER_CANISTER}, utils::test_identity, @@ -16,12 +17,16 @@ use ic_icrc1_test_utils::LedgerEndpointArg; use ic_icrc1_tokens_u256::U256; use ic_ledger_test_utils::build_ledger_wasm; use ic_ledger_test_utils::pocket_ic_helpers::ledger::LEDGER_CANISTER_ID; +use ic_nns_common::init::LifelineCanisterInitPayloadBuilder; use ic_nns_constants::GOVERNANCE_CANISTER_ID; use ic_nns_constants::LIFELINE_CANISTER_ID; +use ic_nns_constants::REGISTRY_CANISTER_ID; use ic_nns_constants::ROOT_CANISTER_ID; use ic_nns_governance_init::GovernanceCanisterInitPayloadBuilder; use ic_nns_handler_root::init::RootCanisterInitPayloadBuilder; use ic_nns_test_utils::common::build_governance_wasm; +use ic_nns_test_utils::common::build_lifeline_wasm; +use ic_nns_test_utils::common::build_registry_wasm; use ic_nns_test_utils::common::build_root_wasm; use ic_rosetta_test_utils::path_from_env; use ic_types::PrincipalId; @@ -32,6 +37,7 @@ use num_traits::cast::ToPrimitive; use pocket_ic::CanisterSettings; use pocket_ic::{nonblocking::PocketIc, PocketIcBuilder}; use prost::Message; +use registry_canister::init::RegistryCanisterInitPayloadBuilder; use rosetta_core::identifiers::NetworkIdentifier; use std::collections::HashMap; use tempfile::TempDir; @@ -132,14 +138,6 @@ impl RosettaTestingEnvironment { } pub async fn restart_rosetta_node(mut self, options: RosettaOptions) -> Self { - let ledger_tip = self - .rosetta_client - .network_status(self.network_identifier.clone()) - .await - .unwrap() - .current_block_identifier - .index; - self.rosetta_context.kill_rosetta_process(); let rosetta_bin = path_from_env("ROSETTA_BIN_PATH"); @@ -149,13 +147,12 @@ impl RosettaTestingEnvironment { self.rosetta_client = RosettaClient::from_str_url(&format!("http://localhost:{}", self.rosetta_context.port)) .expect("Unable to parse url"); - wait_for_rosetta_to_sync_up_to_block( + wait_for_rosetta_to_catch_up_with_icp_ledger( &self.rosetta_client, self.network_identifier.clone(), - ledger_tip, + &get_test_agent(self.pocket_ic.url().unwrap().port().unwrap()).await, ) - .await - .unwrap(); + .await; self } } @@ -280,7 +277,9 @@ impl RosettaTestingEnvironmentBuilder { Some(nns_root_canister_controller), ) .await; - + pocket_ic + .add_cycles(nns_root_canister_id, STARTING_CYCLES_PER_CANISTER) + .await; let governance_canister_wasm = build_governance_wasm(); let governance_canister_id = Principal::from(GOVERNANCE_CANISTER_ID); let governance_canister_controller = ROOT_CANISTER_ID.get().0; @@ -316,6 +315,61 @@ impl RosettaTestingEnvironmentBuilder { .advance_time(std::time::Duration::from_secs(60)) .await; pocket_ic.tick().await; + + let nns_lifeline_canister_wasm = build_lifeline_wasm(); + let nns_lifeline_canister_id = Principal::from(LIFELINE_CANISTER_ID); + let nns_lifeline_canister_controller = ROOT_CANISTER_ID.get().0; + let nns_lifeline_canister = pocket_ic + .create_canister_with_id( + Some(nns_lifeline_canister_controller), + Some(CanisterSettings { + controllers: Some(vec![nns_lifeline_canister_controller]), + ..Default::default() + }), + nns_lifeline_canister_id, + ) + .await + .expect("Unable to create the NNS Lifeline canister"); + + pocket_ic + .install_canister( + nns_lifeline_canister, + nns_lifeline_canister_wasm.bytes().to_vec(), + Encode!(&LifelineCanisterInitPayloadBuilder::new().build()).unwrap(), + Some(nns_lifeline_canister_controller), + ) + .await; + pocket_ic + .add_cycles(nns_lifeline_canister_id, STARTING_CYCLES_PER_CANISTER) + .await; + + let nns_registry_canister_wasm = build_registry_wasm(); + let nns_registry_canister_id = Principal::from(REGISTRY_CANISTER_ID); + let nns_registry_canister_controller = ROOT_CANISTER_ID.get().0; + let nns_registry_canister = pocket_ic + .create_canister_with_id( + Some(nns_registry_canister_controller), + Some(CanisterSettings { + controllers: Some(vec![nns_registry_canister_controller]), + ..Default::default() + }), + nns_registry_canister_id, + ) + .await + .expect("Unable to create the NNS Registry canister"); + + pocket_ic + .install_canister( + nns_registry_canister, + nns_registry_canister_wasm.bytes().to_vec(), + Encode!(&RegistryCanisterInitPayloadBuilder::new().build()).unwrap(), + Some(nns_registry_canister_controller), + ) + .await; + + pocket_ic + .add_cycles(nns_registry_canister_id, STARTING_CYCLES_PER_CANISTER) + .await; } let replica_url = pocket_ic.make_live(None).await; @@ -384,19 +438,12 @@ impl RosettaTestingEnvironmentBuilder { .unwrap(); // Wait for rosetta to catch up with the ledger - if let Some(last_block_idx) = block_idxes.last() { - let rosetta_last_block_idx = wait_for_rosetta_to_sync_up_to_block( - &rosetta_client, - network_identifier.clone(), - *last_block_idx, - ) - .await; - assert_eq!( - Some(*last_block_idx), - rosetta_last_block_idx, - "Wait for rosetta sync failed." - ); - } + wait_for_rosetta_to_catch_up_with_icp_ledger( + &rosetta_client, + network_identifier.clone(), + &get_test_agent(replica_port).await, + ) + .await; RosettaTestingEnvironment { pocket_ic, diff --git a/rs/rosetta-api/icp/tests/system_tests/common/utils.rs b/rs/rosetta-api/icp/tests/system_tests/common/utils.rs index ecaf0867c0e..1d98d82455b 100644 --- a/rs/rosetta-api/icp/tests/system_tests/common/utils.rs +++ b/rs/rosetta-api/icp/tests/system_tests/common/utils.rs @@ -10,6 +10,7 @@ use ic_nns_constants::GOVERNANCE_CANISTER_ID; use ic_nns_constants::LEDGER_CANISTER_ID; use ic_nns_governance::pb::v1::ListNeurons; use ic_nns_governance::pb::v1::ListNeuronsResponse; +use ic_nns_governance_api::pb::v1::GovernanceError; use ic_rosetta_api::convert::to_hash; use icp_ledger::GetBlocksArgs; use icp_ledger::QueryEncodedBlocksResponse; @@ -190,3 +191,17 @@ pub async fn list_neurons(agent: &Agent) -> ListNeuronsResponse { ) .unwrap() } + +pub async fn update_neuron(agent: &Agent, neuron: ic_nns_governance_api::pb::v1::Neuron) { + let result = Decode!( + &agent + .update(&GOVERNANCE_CANISTER_ID.into(), "update_neuron") + .with_arg(Encode!(&neuron).unwrap()) + .call_and_wait() + .await + .unwrap(), + Option + ) + .unwrap(); + assert!(result.is_none(), "Failed to update neuron: {:?}", result); +} diff --git a/rs/rosetta-api/icp/tests/system_tests/test_cases/neuron_management.rs b/rs/rosetta-api/icp/tests/system_tests/test_cases/neuron_management.rs index 248b1e8b8b6..c59a09c3453 100644 --- a/rs/rosetta-api/icp/tests/system_tests/test_cases/neuron_management.rs +++ b/rs/rosetta-api/icp/tests/system_tests/test_cases/neuron_management.rs @@ -1,3 +1,4 @@ +use crate::common::utils::update_neuron; use crate::common::utils::wait_for_rosetta_to_catch_up_with_icp_ledger; use crate::common::{ system_test_environment::RosettaTestingEnvironment, @@ -6,14 +7,17 @@ use crate::common::{ use ic_agent::{identity::BasicIdentity, Identity}; use ic_icp_rosetta_client::RosettaChangeAutoStakeMaturityArgs; use ic_icp_rosetta_client::RosettaIncreaseNeuronStakeArgs; -use ic_icp_rosetta_client::{RosettaCreateNeuronArgs, RosettaSetNeuronDissolveDelayArgs}; +use ic_icp_rosetta_client::{ + RosettaCreateNeuronArgs, RosettaDisburseNeuronArgs, RosettaSetNeuronDissolveDelayArgs, +}; use ic_nns_governance::pb::v1::neuron::DissolveState; -use ic_rosetta_api::request::transaction_operation_results::TransactionOperationResults; +use ic_rosetta_api::{ + models::AccountBalanceRequest, + request::transaction_operation_results::TransactionOperationResults, +}; use ic_types::PrincipalId; -use icp_ledger::AccountIdentifier; -use icp_ledger::DEFAULT_TRANSFER_FEE; +use icp_ledger::{AccountIdentifier, DEFAULT_TRANSFER_FEE}; use lazy_static::lazy_static; -use rosetta_core::request_types::AccountBalanceRequest; use std::{ sync::Arc, time::{SystemTime, UNIX_EPOCH}, @@ -499,3 +503,151 @@ fn test_change_auto_stake_maturity() { assert!(neuron.auto_stake_maturity.is_none()); }); } + +#[test] +fn test_disburse_neuron() { + let rt = Runtime::new().unwrap(); + rt.block_on(async { + let initial_balance = 100_000_000_000; + let env = RosettaTestingEnvironment::builder() + .with_initial_balances( + vec![( + AccountIdentifier::from(TEST_IDENTITY.sender().unwrap()), + // A hundred million ICP should be enough + icp_ledger::Tokens::from_e8s(initial_balance), + )] + .into_iter() + .collect(), + ) + .with_governance_canister() + .build() + .await; + + // Stake the minimum amount 100 million e8s + let staked_amount = initial_balance/10; + let neuron_index = 0; + let from_subaccount = [0; 32]; + + env.rosetta_client + .create_neuron( + env.network_identifier.clone(), + &(*TEST_IDENTITY).clone(), + RosettaCreateNeuronArgs::builder(staked_amount.into()) + .with_from_subaccount(from_subaccount) + .with_neuron_index(neuron_index) + .build(), + ) + .await + .unwrap(); + // See if the neuron was created successfully + let agent = get_test_agent(env.pocket_ic.url().unwrap().port().unwrap()).await; + + TransactionOperationResults::try_from( + env.rosetta_client + .start_dissolving_neuron( + env.network_identifier.clone(), + &(*TEST_IDENTITY).clone(), + neuron_index, + ) + .await + .unwrap() + .metadata, + ) + .unwrap(); + + let mut neuron = list_neurons(&agent).await.full_neurons[0].to_owned(); + // If we try to disburse the neuron when it is not yet DISSOLVED we expect an error + match env + .rosetta_client + .disburse_neuron( + env.network_identifier.clone(), + &(*TEST_IDENTITY).clone(), + RosettaDisburseNeuronArgs::builder(neuron_index) + .with_recipient(TEST_IDENTITY.sender().unwrap().into()) + .build(), + ) + .await + { + Err(e) if e.to_string().contains(&format!("Could not disburse: PreconditionFailed: Neuron {} has NOT been dissolved. It is in state Dissolving",neuron.id.unwrap().id)) => (), + Err(e) => panic!("Unexpected error: {}", e), + Ok(_) => panic!("Expected an error but got success"), + } + // Let rosetta catch up with the transfer that happended when creating the neuron + wait_for_rosetta_to_catch_up_with_icp_ledger( + &env.rosetta_client, + env.network_identifier.clone(), + &agent, + ).await; + let balance_before_disburse = env + .rosetta_client + .account_balance( + AccountBalanceRequest::builder( + env.network_identifier.clone(), + AccountIdentifier::from(TEST_IDENTITY.sender().unwrap()).into(), + ) + .build(), + ) + .await + .unwrap() + .balances + .first() + .unwrap() + .clone() + .value.parse::().unwrap(); + + // We now update the neuron so it is in state DISSOLVED + let now = env.pocket_ic.get_time().await.duration_since(UNIX_EPOCH).unwrap().as_secs(); + neuron.dissolve_state = Some(DissolveState::WhenDissolvedTimestampSeconds(now - 1)); + update_neuron(&agent, neuron.into()).await; + + match list_neurons(&agent).await.full_neurons[0].dissolve_state.unwrap() { + DissolveState::WhenDissolvedTimestampSeconds (d) => { + // The neuron should now be in DISSOLVED state + assert!(d panic!( + "Neuron should be in DissolveDelaySeconds state, but is instead: {:?}", + k + ), + } + + // Now we should be able to disburse the neuron + env.rosetta_client + .disburse_neuron( + env.network_identifier.clone(), + &(*TEST_IDENTITY).clone(), + RosettaDisburseNeuronArgs::builder(neuron_index) + .with_recipient(TEST_IDENTITY.sender().unwrap().into()) + .build(), + ) + .await + .unwrap(); + + // Wait for the ledger to sync up to the block where the disbursement happened + wait_for_rosetta_to_catch_up_with_icp_ledger( + &env.rosetta_client, + env.network_identifier.clone(), + &agent, + ) + .await; + + // The recipient should have received the disbursed amount + let balance_after_disburse = env + .rosetta_client + .account_balance( + AccountBalanceRequest::builder( + env.network_identifier.clone(), + AccountIdentifier::from(TEST_IDENTITY.sender().unwrap()).into(), + ) + .build(), + ) + .await + .unwrap() + .balances + .first() + .unwrap().clone() + .value.parse::().unwrap(); + // The balance should be the same as before the creation of the neuron minus the transfer fee + assert_eq!(balance_after_disburse, balance_before_disburse + staked_amount - DEFAULT_TRANSFER_FEE.get_e8s()); + }); +} diff --git a/rs/tests/Cargo.toml b/rs/tests/Cargo.toml index cc9e688fd06..0db2d67e31f 100644 --- a/rs/tests/Cargo.toml +++ b/rs/tests/Cargo.toml @@ -195,10 +195,6 @@ path = "financial_integrations/rosetta/rosetta_make_transactions_test.rs" name = "ic-systest-rosetta-network-test" path = "financial_integrations/rosetta/rosetta_network_test.rs" -[[bin]] -name = "ic-systest-rosetta-neuron-disburse-test" -path = "financial_integrations/rosetta/rosetta_neuron_disburse_test.rs" - [[bin]] name = "ic-systest-rosetta-neuron-dissolve-test" path = "financial_integrations/rosetta/rosetta_neuron_dissolve_test.rs" diff --git a/rs/tests/financial_integrations/rosetta/BUILD.bazel b/rs/tests/financial_integrations/rosetta/BUILD.bazel index e8792bcc3cb..8646f37569f 100644 --- a/rs/tests/financial_integrations/rosetta/BUILD.bazel +++ b/rs/tests/financial_integrations/rosetta/BUILD.bazel @@ -78,27 +78,6 @@ system_test_nns( deps = DEPENDENCIES + ["//rs/tests"], ) -system_test_nns( - name = "rosetta_neuron_disburse_test", - extra_head_nns_tags = [], # don't run the head_nns variant on nightly since it aleady runs on long_test. - flaky = True, - proc_macro_deps = MACRO_DEPENDENCIES, - tags = [ - "k8s", - "long_test", # since it takes longer than 5 minutes. - ], - target_compatible_with = ["@platforms//os:linux"], # requires libssh that does not build on Mac OS - runtime_deps = - GUESTOS_RUNTIME_DEPS + - UNIVERSAL_VM_RUNTIME_DEPS + [ - "//rs/rosetta-api/icp:ic-rosetta-api", - "//rs/rosetta-api/icp:rosetta_image.tar", - "//rs/tests:rosetta_workspace", - "@rosetta-cli//:rosetta-cli", - ], - deps = DEPENDENCIES + ["//rs/tests"], -) - system_test_nns( name = "rosetta_neuron_dissolve_test", flaky = True, diff --git a/rs/tests/financial_integrations/rosetta/rosetta_neuron_disburse_test.rs b/rs/tests/financial_integrations/rosetta/rosetta_neuron_disburse_test.rs deleted file mode 100644 index 4a738f442f8..00000000000 --- a/rs/tests/financial_integrations/rosetta/rosetta_neuron_disburse_test.rs +++ /dev/null @@ -1,21 +0,0 @@ -#[rustfmt::skip] -use anyhow::Result; - -use ic_system_test_driver::driver::group::SystemTestGroup; -use ic_system_test_driver::driver::test_env::TestEnv; -use ic_system_test_driver::systest; -use ic_tests::rosetta_tests; -use rosetta_tests::setup::{ROSETTA_TESTS_OVERALL_TIMEOUT, ROSETTA_TESTS_PER_TEST_TIMEOUT}; -use rosetta_tests::tests; - -fn main() -> Result<()> { - SystemTestGroup::new() - .with_setup(group_setup) - .with_overall_timeout(ROSETTA_TESTS_OVERALL_TIMEOUT) - .with_timeout_per_test(ROSETTA_TESTS_PER_TEST_TIMEOUT) - .add_test(systest!(tests::neuron_disburse::test)) - .execute_from_args()?; - Ok(()) -} - -fn group_setup(_env: TestEnv) {} diff --git a/rs/tests/src/rosetta_tests/tests.rs b/rs/tests/src/rosetta_tests/tests.rs index 4e6485e0135..58d4cea2c0b 100644 --- a/rs/tests/src/rosetta_tests/tests.rs +++ b/rs/tests/src/rosetta_tests/tests.rs @@ -3,7 +3,6 @@ pub mod list_known_neurons; pub mod list_neurons; pub mod make_transaction; pub mod network; -pub mod neuron_disburse; pub mod neuron_dissolve; pub mod neuron_follow; pub mod neuron_hotkey; diff --git a/rs/tests/src/rosetta_tests/tests/neuron_disburse.rs b/rs/tests/src/rosetta_tests/tests/neuron_disburse.rs deleted file mode 100644 index 4e97ad50bdb..00000000000 --- a/rs/tests/src/rosetta_tests/tests/neuron_disburse.rs +++ /dev/null @@ -1,300 +0,0 @@ -use crate::rosetta_tests::{ - ledger_client::LedgerClient, - lib::{ - check_balance, create_ledger_client, do_multiple_txn, make_user_ed25519, - one_day_from_now_nanos, sign_txn, to_public_key, NeuronDetails, - }, - rosetta_client::RosettaApiClient, - setup::setup, - test_neurons::TestNeurons, -}; -use ic_ledger_core::{ - tokens::{CheckedAdd, CheckedSub}, - Tokens, -}; -use ic_nns_governance_api::pb::v1::{neuron::DissolveState, Neuron}; -use ic_rosetta_api::{ - models::{ConstructionPayloadsResponse, SignedTransaction}, - request::{ - request_result::RequestResult, transaction_operation_results::TransactionOperationResults, - Request, - }, - request_types::{Disburse, Status}, -}; -use ic_rosetta_test_utils::RequestInfo; -use ic_system_test_driver::{driver::test_env::TestEnv, util::block_on}; -use icp_ledger::{AccountIdentifier, DEFAULT_TRANSFER_FEE}; -use rosetta_core::objects::ObjectMap; -use serde_json::json; -use slog::Logger; -use std::{collections::HashMap, str::FromStr, sync::Arc, time::UNIX_EPOCH}; - -const PORT: u32 = 8104; -const VM_NAME: &str = "neuron-disburse"; - -pub fn test(env: TestEnv) { - let logger = env.logger(); - - let mut ledger_balances = HashMap::new(); - - // Create neurons. - let neuron_setup = |neuron: &mut Neuron| { - neuron.dissolve_state = Some(DissolveState::WhenDissolvedTimestampSeconds(0)) - }; - - let one_year_from_now = 60 * 60 * 24 * 365 - + std::time::SystemTime::now() - .duration_since(UNIX_EPOCH) - .unwrap() - .as_secs(); - - let mut neurons = TestNeurons::new(2000, &mut ledger_balances); - - let neuron1 = neurons.create(neuron_setup); - let neuron2 = neurons.create(neuron_setup); - let neuron3 = neurons.create(neuron_setup); - let neuron4 = neurons.create(|neuron| { - neuron.dissolve_state = Some(DissolveState::WhenDissolvedTimestampSeconds( - one_year_from_now, - )) - }); - let neuron5 = neurons.create(neuron_setup); - let neuron6 = neurons.create(neuron_setup); - let neuron7 = neurons.create(neuron_setup); - - // Create Rosetta and ledger clients. - let neurons = neurons.get_neurons(); - let client = setup(&env, PORT, VM_NAME, Some(ledger_balances), Some(neurons)); - let ledger_client = create_ledger_client(&env, &client); - - block_on(async { - test_disburse_raw(&client, &ledger_client, &neuron1, None, None, &logger) - .await - .expect("Failed test raw disburse"); - - test_disburse(&client, &ledger_client, &neuron2, None, None) - .await - .expect("Failed test disburse"); - - // Disburse to custom recipient. - let (recipient, _, _, _) = make_user_ed25519(102); - test_disburse(&client, &ledger_client, &neuron3, None, Some(recipient)) - .await - .expect("Failed test disburse to custom recipient"); - - // Disburse before neuron is dissolved (fail expected). - test_disburse(&client, &ledger_client, &neuron4, None, Some(recipient)) - .await - .unwrap_err(); - - // Disburse an amount. - test_disburse( - &client, - &ledger_client, - &neuron5, - Some(Tokens::new(5, 0).unwrap()), - None, - ) - .await - .expect("Failed test disburse an amount"); - - // Disburse full stake. - test_disburse( - &client, - &ledger_client, - &neuron6, - Some(Tokens::new(10, 0).unwrap()), - None, - ) - .await - .expect("Failed test disburse full stake"); - - // Disburse more than staked amount. - test_disburse( - &client, - &ledger_client, - &neuron7, - Some(Tokens::new(11, 0).unwrap()), - None, - ) - .await - .unwrap_err() - }); -} - -#[allow(clippy::too_many_arguments)] -async fn test_disburse( - ros: &RosettaApiClient, - ledger_client: &LedgerClient, - neuron_info: &NeuronDetails, - amount: Option, - recipient: Option, -) -> Result<(), ic_rosetta_api::models::Error> { - let neuron = &neuron_info.neuron; - let acc = neuron_info.account_id; - let key_pair = Arc::new(neuron_info.key_pair.clone()); - let neuron_index = neuron_info.neuron_subaccount_identifier; - - let pre_disburse = ledger_client.get_account_balance(acc).await; - let (_, tip_idx) = ledger_client.get_tip().await; - - let res = do_multiple_txn( - ros, - &[RequestInfo { - request: Request::Disburse(Disburse { - account: acc, - amount, - recipient, - neuron_index, - }), - sender_keypair: Arc::clone(&key_pair), - }], - false, - Some(one_day_from_now_nanos()), - None, - ) - .await - .map(|(tx_id, results, _)| { - assert!(!tx_id.is_transfer()); - assert!(matches!( - results.operations.first().unwrap(), - RequestResult { - _type: Request::Disburse(_), - status: Status::Completed, - .. - } - )); - results - })?; - - let amount = amount.unwrap_or_else(|| Tokens::from_e8s(neuron.cached_neuron_stake_e8s)); - - let expected_idx = tip_idx + 1; - - if let Some(h) = res.last_block_index() { - assert_eq!(h, expected_idx); - } - let _ = ros.wait_for_block_at(expected_idx).await.unwrap(); - - // governance assumes the default fee for disburse and that's why this check uses the - // DEFAULT_TRANSFER_FEE. - check_balance( - ros, - ledger_client, - &recipient.unwrap_or(acc), - pre_disburse - .checked_add(&amount) - .unwrap() - .checked_sub(&DEFAULT_TRANSFER_FEE) - .unwrap(), - ) - .await; - Ok(()) -} - -#[allow(clippy::too_many_arguments)] -async fn test_disburse_raw( - ros: &RosettaApiClient, - ledger_client: &LedgerClient, - neuron_info: &NeuronDetails, - amount: Option, - recipient: Option, - _logger: &Logger, -) -> Result<(), ic_rosetta_api::models::Error> { - let neuron = &neuron_info.neuron; - let acc = neuron_info.account_id; - let key_pair = Arc::new(neuron_info.key_pair.clone()); - let neuron_index = neuron_info.neuron_subaccount_identifier; - - let pre_disburse = ledger_client.get_account_balance(acc).await; - let (_, tip_idx) = ledger_client.get_tip().await; - let req = json!({ - "network_identifier": &ros.network_id(), - "operations": [ - { - "operation_identifier": { - "index": 0 - }, - "type": "DISBURSE", - "account": { - "address": &acc - }, - "metadata": { - "neuron_index": &neuron_index - } - } - ] - }); - let req = req.to_string(); - - let metadata: ObjectMap = serde_json::from_slice( - &ros.raw_construction_endpoint("metadata", req.as_bytes()) - .await - .unwrap() - .0, - ) - .unwrap(); - - let mut req: ObjectMap = serde_json::from_str(&req).unwrap(); - req.insert("metadata".to_string(), metadata.into()); - req.insert( - "public_keys".to_string(), - serde_json::to_value(vec![to_public_key(&key_pair)]).unwrap(), - ); - - let payloads: ConstructionPayloadsResponse = serde_json::from_slice( - &ros.raw_construction_endpoint("payloads", &serde_json::to_vec_pretty(&req).unwrap()) - .await - .unwrap() - .0, - ) - .unwrap(); - - let signed = sign_txn(ros, &[key_pair.clone()], payloads).await.unwrap(); - - let hash_res = ros - .construction_hash(signed.signed_transaction.clone()) - .await - .unwrap()?; - - let submit_res = ros - .construction_submit(SignedTransaction::from_str(&signed.signed_transaction).unwrap()) - .await - .unwrap()?; - - assert_eq!( - hash_res.transaction_identifier, - submit_res.transaction_identifier - ); - - for op in TransactionOperationResults::try_from(submit_res.metadata) - .unwrap() - .operations - .iter() - { - assert_eq!( - op.status.as_ref().expect("Expecting status to be set."), - "COMPLETED", - "Operation didn't complete." - ); - } - - let amount = amount.unwrap_or_else(|| Tokens::from_e8s(neuron.cached_neuron_stake_e8s)); - let expected_idx = tip_idx + 1; - let _ = ros.wait_for_block_at(expected_idx).await.unwrap(); - - // governance assumes the default fee for disburse and that's why this check uses the - // DEFAULT_TRANSFER_FEE. - check_balance( - ros, - ledger_client, - &recipient.unwrap_or(acc), - pre_disburse - .checked_add(&amount) - .unwrap() - .checked_sub(&DEFAULT_TRANSFER_FEE) - .unwrap(), - ) - .await; - Ok(()) -} From e3c408cd0dd5cc347f64515a54e451efc0dadefb Mon Sep 17 00:00:00 2001 From: kpop-dfinity <125868903+kpop-dfinity@users.noreply.github.com> Date: Thu, 24 Oct 2024 20:23:44 +0200 Subject: [PATCH 4/4] feat(consensus): push all ingress messages (#2233) Currently, we push ingress messages only if the size of the message is below 1024 bytes (see [link](https://sourcegraph.com/github.com/dfinity/ic/-/blob/rs/p2p/consensus_manager/src/sender.rs?L232-234)), meaning that for large enough ingress messages, a node first has to see an advert and then request to download the advertized ingress message from a peer, before it can include the message in a block, potentially adding a couple hundreds of milliseconds to the end-to-end ingress message latency. The benefits are even bigger in the context of hashes-in-blocks feature, where we rely on existence of ingress messages in the pool when validating blocks received from peers - pushing ingress messages should increase the chances that all the ingress messages referenced by a "stripped block" are already in the ingress pool, so we don't have to fetch them from peers. One downside of always pushing ingress messages could be a wasted bandwith, i.e. node might receive an ingress message from a peer and then immediately discard it because the [bouncer function](https://sourcegraph.com/github.com/dfinity/ic/-/blob/rs/ingress_manager/src/bouncer.rs?L32-40) deemed the ingress message to be not needed (e.g. if the ingress message already expired, from the node point's of view), but this should not be a rather rare occurrence (we actively purge expired ingress messages). --- rs/artifact_pool/src/ingress_pool.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rs/artifact_pool/src/ingress_pool.rs b/rs/artifact_pool/src/ingress_pool.rs index c5c42f8e715..542894e7b47 100644 --- a/rs/artifact_pool/src/ingress_pool.rs +++ b/rs/artifact_pool/src/ingress_pool.rs @@ -286,7 +286,7 @@ impl MutablePool for IngressPoolImpl { if unvalidated_artifact.peer_id == self.node_id { transmits.push(ArtifactTransmit::Deliver(ArtifactWithOpt { artifact: unvalidated_artifact.message.signed_ingress.clone(), - is_latency_sensitive: false, + is_latency_sensitive: true, })); } self.validated.insert(