From a9f900306fb32aa687e63a1c44b3beffb173d4ca Mon Sep 17 00:00:00 2001 From: Ryan Tan Date: Wed, 29 May 2024 15:11:33 +0100 Subject: [PATCH 01/11] Baking nonces: Improve warning to bakers when baking nonces cannot be migrated --- .../lib_delegate/baking_events.ml | 17 ++++++++++++----- .../lib_delegate/baking_nonces.ml | 14 ++++++++++++-- .../lib_delegate/baking_events.ml | 17 ++++++++++++----- .../lib_delegate/baking_nonces.ml | 14 ++++++++++++-- src/proto_alpha/lib_delegate/baking_events.ml | 17 ++++++++++++----- src/proto_alpha/lib_delegate/baking_nonces.ml | 14 ++++++++++++-- 6 files changed, 72 insertions(+), 21 deletions(-) diff --git a/src/proto_018_Proxford/lib_delegate/baking_events.ml b/src/proto_018_Proxford/lib_delegate/baking_events.ml index 3c2f4e46fff8..e5cca175019d 100644 --- a/src/proto_018_Proxford/lib_delegate/baking_events.ml +++ b/src/proto_018_Proxford/lib_delegate/baking_events.ml @@ -1059,17 +1059,24 @@ module Nonces = struct () let ignore_failed_nonce_migration = - declare_1 + declare_3 ~section ~name:"ignore_failed_nonce_migration" ~level:Warning ~msg: - "There is not enough block history to complete the migration. Try \ - starting from an older snapshot or providing more block history. The \ - nonces from the following blocks will not be migrated:\n\ - {failed} " + "Found orphaned nonces while migrating baking nonces to the new file \ + format. Please review the list of associated blocks. If the block is \ + older than the last cycle or if it was not included, the file at \ + '{legacy_nonces_file}' and '{orphaned_nonces_file}'should be archived \ + and then removed. If the block is in the current or last cycle, you \ + must start from a snapshot that is old enough to boostrap those \ + blocks to avoid losing some of your baking rewards. Blocks associated \ + with orphaned nonces:\n\ + {failed}" ~pp1:(Format.pp_print_list Block_hash.pp) ("failed", Data_encoding.list Block_hash.encoding) + ("legacy_nonces_file", Data_encoding.string) + ("orphaned_nonces_file", Data_encoding.string) let outdated_nonce = declare_1 diff --git a/src/proto_018_Proxford/lib_delegate/baking_nonces.ml b/src/proto_018_Proxford/lib_delegate/baking_nonces.ml index 3572b0030027..314350893aad 100644 --- a/src/proto_018_Proxford/lib_delegate/baking_nonces.ml +++ b/src/proto_018_Proxford/lib_delegate/baking_nonces.ml @@ -201,9 +201,9 @@ let try_migrate_legacy_nonces state = } = state in + let legacy_location = Baking_files.filename legacy_location in let migrate () = let open Lwt_result_syntax in - let legacy_location = Baking_files.filename legacy_location in let* legacy_nonces = cctxt#load legacy_location ~default:legacy_empty legacy_encoding in @@ -291,7 +291,17 @@ let try_migrate_legacy_nonces state = failed_migration [] in - Events.(emit ignore_failed_nonce_migration failed_block_hashes) + let legacy_filename = + Filename.concat cctxt#get_base_dir legacy_location + in + let orphaned_location = Baking_files.filename orphaned_location in + let orphaned_filename = + Filename.concat cctxt#get_base_dir orphaned_location + in + Events.( + emit + ignore_failed_nonce_migration + (failed_block_hashes, legacy_filename, orphaned_filename)) | Error _ -> return_unit (** [partition_unrevealed_nonces state nonces current_cycle current_level] partitions diff --git a/src/proto_019_PtParisB/lib_delegate/baking_events.ml b/src/proto_019_PtParisB/lib_delegate/baking_events.ml index 500d70db30dd..cb3fdedb9894 100644 --- a/src/proto_019_PtParisB/lib_delegate/baking_events.ml +++ b/src/proto_019_PtParisB/lib_delegate/baking_events.ml @@ -1046,17 +1046,24 @@ module Nonces = struct () let ignore_failed_nonce_migration = - declare_1 + declare_3 ~section ~name:"ignore_failed_nonce_migration" ~level:Warning ~msg: - "There is not enough block history to complete the migration. Try \ - starting from an older snapshot or providing more block history. The \ - nonces from the following blocks will not be migrated:\n\ - {failed} " + "Found orphaned nonces while migrating baking nonces to the new file \ + format. Please review the list of associated blocks. If the block is \ + older than the last cycle or if it was not included, the file at \ + '{legacy_nonces_file}' and '{orphaned_nonces_file}'should be archived \ + and then removed. If the block is in the current or last cycle, you \ + must start from a snapshot that is old enough to boostrap those \ + blocks to avoid losing some of your baking rewards. Blocks associated \ + with orphaned nonces:\n\ + {failed}" ~pp1:(Format.pp_print_list Block_hash.pp) ("failed", Data_encoding.list Block_hash.encoding) + ("legacy_nonces_file", Data_encoding.string) + ("orphaned_nonces_file", Data_encoding.string) let outdated_nonce = declare_1 diff --git a/src/proto_019_PtParisB/lib_delegate/baking_nonces.ml b/src/proto_019_PtParisB/lib_delegate/baking_nonces.ml index 6fd910f17f57..7d38c52f8db9 100644 --- a/src/proto_019_PtParisB/lib_delegate/baking_nonces.ml +++ b/src/proto_019_PtParisB/lib_delegate/baking_nonces.ml @@ -201,9 +201,9 @@ let try_migrate_legacy_nonces state = } = state in + let legacy_location = Baking_files.filename legacy_location in let migrate () = let open Lwt_result_syntax in - let legacy_location = Baking_files.filename legacy_location in let* legacy_nonces = cctxt#load legacy_location ~default:legacy_empty legacy_encoding in @@ -291,7 +291,17 @@ let try_migrate_legacy_nonces state = failed_migration [] in - Events.(emit ignore_failed_nonce_migration failed_block_hashes) + let legacy_filename = + Filename.concat cctxt#get_base_dir legacy_location + in + let orphaned_location = Baking_files.filename orphaned_location in + let orphaned_filename = + Filename.concat cctxt#get_base_dir orphaned_location + in + Events.( + emit + ignore_failed_nonce_migration + (failed_block_hashes, legacy_filename, orphaned_filename)) | Error _ -> return_unit (** [partition_unrevealed_nonces state nonces current_cycle current_level] partitions diff --git a/src/proto_alpha/lib_delegate/baking_events.ml b/src/proto_alpha/lib_delegate/baking_events.ml index 500d70db30dd..cb3fdedb9894 100644 --- a/src/proto_alpha/lib_delegate/baking_events.ml +++ b/src/proto_alpha/lib_delegate/baking_events.ml @@ -1046,17 +1046,24 @@ module Nonces = struct () let ignore_failed_nonce_migration = - declare_1 + declare_3 ~section ~name:"ignore_failed_nonce_migration" ~level:Warning ~msg: - "There is not enough block history to complete the migration. Try \ - starting from an older snapshot or providing more block history. The \ - nonces from the following blocks will not be migrated:\n\ - {failed} " + "Found orphaned nonces while migrating baking nonces to the new file \ + format. Please review the list of associated blocks. If the block is \ + older than the last cycle or if it was not included, the file at \ + '{legacy_nonces_file}' and '{orphaned_nonces_file}'should be archived \ + and then removed. If the block is in the current or last cycle, you \ + must start from a snapshot that is old enough to boostrap those \ + blocks to avoid losing some of your baking rewards. Blocks associated \ + with orphaned nonces:\n\ + {failed}" ~pp1:(Format.pp_print_list Block_hash.pp) ("failed", Data_encoding.list Block_hash.encoding) + ("legacy_nonces_file", Data_encoding.string) + ("orphaned_nonces_file", Data_encoding.string) let outdated_nonce = declare_1 diff --git a/src/proto_alpha/lib_delegate/baking_nonces.ml b/src/proto_alpha/lib_delegate/baking_nonces.ml index 6fd910f17f57..7d38c52f8db9 100644 --- a/src/proto_alpha/lib_delegate/baking_nonces.ml +++ b/src/proto_alpha/lib_delegate/baking_nonces.ml @@ -201,9 +201,9 @@ let try_migrate_legacy_nonces state = } = state in + let legacy_location = Baking_files.filename legacy_location in let migrate () = let open Lwt_result_syntax in - let legacy_location = Baking_files.filename legacy_location in let* legacy_nonces = cctxt#load legacy_location ~default:legacy_empty legacy_encoding in @@ -291,7 +291,17 @@ let try_migrate_legacy_nonces state = failed_migration [] in - Events.(emit ignore_failed_nonce_migration failed_block_hashes) + let legacy_filename = + Filename.concat cctxt#get_base_dir legacy_location + in + let orphaned_location = Baking_files.filename orphaned_location in + let orphaned_filename = + Filename.concat cctxt#get_base_dir orphaned_location + in + Events.( + emit + ignore_failed_nonce_migration + (failed_block_hashes, legacy_filename, orphaned_filename)) | Error _ -> return_unit (** [partition_unrevealed_nonces state nonces current_cycle current_level] partitions -- GitLab From f9ab68860fcf76174f285dff3eee44fd8d9a0296 Mon Sep 17 00:00:00 2001 From: Ryan Tan Date: Wed, 5 Jun 2024 16:02:14 +0100 Subject: [PATCH 02/11] Baking nonces: Tezt to show warning is removed when orphaned nonces do not exist --- tezt/tests/nonce_seed_revelation.ml | 114 +++++++++++++++++++++++++++- 1 file changed, 113 insertions(+), 1 deletion(-) diff --git a/tezt/tests/nonce_seed_revelation.ml b/tezt/tests/nonce_seed_revelation.ml index c5794250db47..0a310325f67d 100644 --- a/tezt/tests/nonce_seed_revelation.ml +++ b/tezt/tests/nonce_seed_revelation.ml @@ -230,4 +230,116 @@ let test_nonce_seed_revelation = hashes (%R)") ; unit -let register ~protocols = test_nonce_seed_revelation protocols +let test_baking_nonce_migration = + Protocol.register_test + ~__FILE__ + ~title:"Baking nonce format migration" + ~tags:[team; "nonce"; "migration"] + ~uses:(fun protocol -> [Protocol.baker protocol]) + @@ fun protocol -> + Log.info "Initialize node and client" ; + let* node, client = + Client.init_with_node ~nodes_args:[Synchronisation_threshold 0] `Client () + in + let* () = Node.wait_for_ready node in + + Log.info "Initialize baker with all bootstrap keys" ; + let delegates = + Array.to_list + @@ Array.map (fun key -> Account.(key.alias)) Account.Bootstrap.keys + in + let baker = Baker.create ~protocol ~delegates node client in + + (* Reduce the block per cycle to gain some time *) + let blocks_per_cycle = 4 in + + (* Target level is 2 cycles *) + let target_level = first_protocol_block + (blocks_per_cycle * 2) in + let target_level_promise = Node.wait_for_level node target_level in + + Log.info "Activate protocol %s" (Protocol.name protocol) ; + let* parameter_file = + Protocol.write_parameter_file + ~base:(Right (protocol, None)) + [ + (["blocks_per_cycle"], `Int blocks_per_cycle); + (["nonce_revelation_threshold"], `Int 2); + ] + in + let* () = + Client.activate_protocol_and_wait + ~timestamp:Now + ~parameter_file + ~protocol + client + in + + Log.info "Start the baker and wait for success migration nonces baker event" ; + let successful_migration_event = + Baker.wait_for baker "success_migrate_nonces.v0" Option.some + in + let* () = Baker.run baker in + + let* _ = successful_migration_event in + + Log.info + "Bake until the level: %d (end of the second cycle) then kill the baker" + target_level ; + let* _ = target_level_promise in + let* () = Baker.kill baker in + + Log.info "Retrieve the nonce file contents" ; + let* chain_id = Client.RPC.call client @@ RPC.get_chain_chain_id () in + let convert_to_b58_short b58_long = + Tezos_crypto.Hashed.Chain_id.(of_b58check_exn b58_long |> to_short_b58check) + in + let nonces_file = + Filename.concat + (Client.base_dir client) + (convert_to_b58_short chain_id ^ "_nonces") + in + let old_nonces_contents = Base.read_file nonces_file in + + let target_level = (target_level * 2) + blocks_per_cycle in + let target_level_promise = Node.wait_for_level node target_level in + + Log.info + "Restart the baker until level: %d (end of the fifth cycle) then kill the \ + baker" + target_level ; + let* () = Baker.run baker in + let* _ = target_level_promise in + let* () = Baker.kill baker in + + Log.info "Concat old nonces contents with the new one" ; + let new_nonces_contents = Base.read_file nonces_file in + let () = + Base.write_file + nonces_file + ~contents:(old_nonces_contents ^ new_nonces_contents) + in + + Log.info + "Restart the baker and wait for ignore failed nonce migration event then \ + kill the baker" ; + let failed_migration_event = + Baker.wait_for baker "ignore_failed_nonce_migration.v0" Option.some + in + let* () = Baker.run baker in + let* _ = failed_migration_event in + let* () = Baker.kill baker in + + Log.info "Remove old nonces contents from nonces file" ; + let () = Base.write_file nonces_file ~contents:new_nonces_contents in + + Log.info "Restart the baker and wait for success migrate nonces event" ; + let successful_migration_event = + Baker.wait_for baker "success_migrate_nonces.v0" Option.some + in + let* () = Baker.run baker in + let* _ = successful_migration_event in + unit + +let register ~protocols = + test_nonce_seed_revelation protocols ; + test_baking_nonce_migration protocols -- GitLab From b30f77d3e282543bf3de84f5dc8107f210f050ba Mon Sep 17 00:00:00 2001 From: Victor Dumitrescu Date: Thu, 30 May 2024 12:11:31 +0200 Subject: [PATCH 03/11] RISC-V PVM: set initial state hash to be the same as in the protocol --- .../lib_protocol/sc_rollup_riscv.ml | 9 ++- src/riscv/lib/src/ocaml_api.rs | 2 +- src/riscv/lib/src/pvm/dummy_pvm.rs | 79 +++++++++++++------ 3 files changed, 64 insertions(+), 26 deletions(-) diff --git a/src/proto_alpha/lib_protocol/sc_rollup_riscv.ml b/src/proto_alpha/lib_protocol/sc_rollup_riscv.ml index 14490b1b993d..70d49703d8fa 100644 --- a/src/proto_alpha/lib_protocol/sc_rollup_riscv.ml +++ b/src/proto_alpha/lib_protocol/sc_rollup_riscv.ml @@ -41,9 +41,12 @@ let minimal_state_encoding = let make_empty_state () = {payload = ""; level = None; message_counter = Z.zero; tick = Z.zero} -let state_hash state = - [Data_encoding.Binary.to_bytes_exn minimal_state_encoding state] - |> Context_hash.hash_bytes |> State_hash.context_hash_to_state_hash +let state_hash _state = + (* In order to synchronise with the node implementation of the PVM at genesis, + * we set the state hash to be the initial state hash of the node + * implementation. *) + State_hash.of_b58check_exn + "srs125KWe9pR1PK3KMeiRc19gmh54Ywc3pm1PMTJntAMpguPvPr6mX" module type S = sig include PS.S diff --git a/src/riscv/lib/src/ocaml_api.rs b/src/riscv/lib/src/ocaml_api.rs index 7a8111cfb0b6..13971107b9d2 100644 --- a/src/riscv/lib/src/ocaml_api.rs +++ b/src/riscv/lib/src/ocaml_api.rs @@ -148,7 +148,7 @@ pub fn octez_riscv_install_boot_sector( #[ocaml::func] #[ocaml::sig("state -> bytes")] pub fn octez_riscv_state_hash(state: Pointer) -> [u8; 32] { - state.as_ref().0.hash().try_into().unwrap() + state.as_ref().0.hash() } #[ocaml::func] diff --git a/src/riscv/lib/src/pvm/dummy_pvm.rs b/src/riscv/lib/src/pvm/dummy_pvm.rs index 710c25bea0f3..3c1bd57a4c31 100644 --- a/src/riscv/lib/src/pvm/dummy_pvm.rs +++ b/src/riscv/lib/src/pvm/dummy_pvm.rs @@ -2,6 +2,7 @@ // // SPDX-License-Identifier: MIT +use crate::storage::Hash; use bincode::serialize; use serde::{Deserialize, Serialize}; use std::fmt; @@ -9,18 +10,47 @@ use std::fmt; const DUMMY_STATUS: &str = "riscv_dummy_status"; #[derive(Debug, Serialize, Deserialize, Default, PartialEq)] -pub struct DummyPvm { +pub struct State { payload: Vec, level: Option, message_counter: u64, tick: u64, } +impl State { + fn empty() -> &'static Self { + const EMPTY_STATE: &State = &State { + payload: Vec::new(), + level: None, + message_counter: 0, + tick: 0, + }; + EMPTY_STATE + } +} + +#[derive(Debug, PartialEq, Serialize, Deserialize)] +pub enum DummyPvm { + // The purpose of this variant is for the encoding (hence, the hash) + // of an empty PVM state to remain constant if the `State` type changes. + // This hash is hardcored in the protocol implementation of the dummy PVM + // in src/proto_alpha/lib_protocol/sc_rollup_riscv.ml. + Empty, + Pvm(State), +} + pub struct Status(String); impl DummyPvm { pub fn empty() -> Self { - Self::default() + Self::Empty + } + + fn state(&self) -> &State { + match self { + Self::Empty => State::empty(), + Self::Pvm(state) => state, + } } pub fn get_status(&self) -> Status { @@ -28,51 +58,56 @@ impl DummyPvm { } pub fn get_tick(&self) -> u64 { - self.tick + self.state().tick } pub fn get_current_level(&self) -> Option { - self.level + self.state().level } pub fn get_message_counter(&self) -> u64 { - self.message_counter + self.state().message_counter } pub fn install_boot_sector(&self, boot_sector: Vec) -> Self { - DummyPvm { + let state = self.state(); + Self::Pvm(State { payload: boot_sector, - level: self.level, - message_counter: self.message_counter, - tick: self.tick, - } + level: state.level, + message_counter: state.message_counter, + tick: state.tick, + }) } pub fn compute_step(&self) -> Self { - DummyPvm { - payload: self.payload.clone(), - level: self.level, - message_counter: self.message_counter, - tick: self.tick + 1, - } + let state = self.state(); + Self::Pvm(State { + payload: state.payload.clone(), + level: state.level, + message_counter: state.message_counter, + tick: state.tick + 1, + }) } pub fn compute_step_many(&self, _max_steps: usize) -> (Self, i64) { - (Self::empty(), 0) + (Self::Empty, 0) } - pub fn hash(&self) -> Vec { + pub fn hash(&self) -> Hash { let bytes = serialize(&self).unwrap(); - tezos_crypto_rs::blake2b::digest_256(&bytes).unwrap() + tezos_crypto_rs::blake2b::digest_256(&bytes) + .unwrap() + .try_into() + .unwrap() } pub fn set_input(&self, level: u32, message_counter: u64, input: Vec) -> Self { - DummyPvm { + Self::Pvm(State { payload: input, level: Some(level), message_counter, - tick: self.tick + 1, - } + tick: self.get_tick() + 1, + }) } } -- GitLab From 48360d525a18722dc3abfb06c07a7105fd3fc1f9 Mon Sep 17 00:00:00 2001 From: Victor Dumitrescu Date: Thu, 30 May 2024 12:12:45 +0200 Subject: [PATCH 04/11] RISC-V PVM: add/enable basic tezt tests --- ...cv - RPC API should work and be stable.out | 255 ++++++++++ ...ces PVM state with messages (external).out | 445 ++++++++++++++++++ ...ces PVM state with messages (internal).out | 195 ++++++++ ...tion of a SCORU executes without error.out | 35 ++ tezt/tests/sc_rollup.ml | 26 +- 5 files changed, 944 insertions(+), 12 deletions(-) create mode 100644 tezt/tests/expected/sc_rollup.ml/Alpha- riscv - RPC API should work and be stable.out create mode 100644 tezt/tests/expected/sc_rollup.ml/Alpha- riscv - node advances PVM state with messages (external).out create mode 100644 tezt/tests/expected/sc_rollup.ml/Alpha- riscv - node advances PVM state with messages (internal).out create mode 100644 tezt/tests/expected/sc_rollup.ml/Alpha- riscv - origination of a SCORU executes without error.out diff --git a/tezt/tests/expected/sc_rollup.ml/Alpha- riscv - RPC API should work and be stable.out b/tezt/tests/expected/sc_rollup.ml/Alpha- riscv - RPC API should work and be stable.out new file mode 100644 index 000000000000..79d208c01922 --- /dev/null +++ b/tezt/tests/expected/sc_rollup.ml/Alpha- riscv - RPC API should work and be stable.out @@ -0,0 +1,255 @@ + +./octez-client --wait none originate smart rollup rollup from bootstrap1 of kind riscv of type string with kernel --burn-cap 9999999 +Node is bootstrapped. +Estimated gas: 1930.030 units (will add 100 for safety) +Estimated storage: 6552 bytes added (will add 20 for safety) +Operation successfully injected in the node. +Operation hash is '[OPERATION_HASH]' +NOT waiting for the operation to be included. +Use command + octez-client wait for [OPERATION_HASH] to be included --confirmations 1 --branch [BLOCK_HASH] +and/or an external block explorer to make sure that it has been included. +This sequence of operations was run: + Manager signed operations: + From: [PUBLIC_KEY_HASH] + Fee to the baker: ꜩ0.000441 + Expected counter: 1 + Gas limit: 2031 + Storage limit: 6572 bytes + Balance updates: + [PUBLIC_KEY_HASH] ... -ꜩ0.000441 + payload fees(the block proposer) ....... +ꜩ0.000441 + Smart rollup origination: + Kind: riscv + Parameter type: string + Kernel Blake2B hash: '0e5751c026e543b2e8ab2eb06099daa1d1e5df47778f7787faab45cdf12fe3a8' + This smart rollup origination was successfully applied + Consumed gas: 1929.997 + Storage size: 6552 bytes + Address: [SMART_ROLLUP_HASH] + Genesis commitment hash: [SC_ROLLUP_COMMITMENT_HASH] + Balance updates: + [PUBLIC_KEY_HASH] ... -ꜩ1.638 + storage fees ........................... +ꜩ1.638 + +Smart rollup [SMART_ROLLUP_HASH] memorized as "rollup" +GET http://[HOST]:[PORT]/global/smart_rollup_address +200 OK +"[SMART_ROLLUP_HASH]" + +POST http://[HOST]:[PORT]/local/batcher/injection +Content-type: application/json +[ + "312d31", + "312d32", + "312d33", + "312d34", + "312d35" +] +200 OK +["scmsg2rwuPwGBrYsQHFuusVq5DAZFfJ4iGcAJ6Ttvyobd6b1V1oSpUk","scmsg3WtaZfwmQQzbHobfzcZw3JFy7eYAAJs9RZsGGrnzwEAnzPP5We","scmsg2VDC22BtmBrJD2vGKdjeNBMv7W6uw1uRVvJmW7jRv2z9yiQTcM","scmsg25K99LfhZYmLRgiyFGPswB3YiNoH6Lw97AuPeZEaC1cKpdNaiN","scmsg3j2jViLpky6CbTuQWUguz3HxuvUL3XZYppP8euHHuWeDdW6QZy"] + +POST http://[HOST]:[PORT]/local/batcher/injection +Content-type: application/json +[ + "322d31", + "322d32", + "322d33", + "322d34", + "322d35" +] +200 OK +["scmsg2Q8u9Ufd95zmF2DmjyhgaHyDNud45t5MXWh4BTdnnVabCT7hFc","scmsg3eoXpnbxkuUcg5DPWLxbLxRaewCjpLnB3GUoT8fa3NdEV1zTj1","scmsg2CcNmHmfTshLkupfyLMsDRQC5bWEVUYJKAY4hWD1QkjeBzqQuU","scmsg2bwoquA7WfWQCmvEpmWV2HNJw6pXX8gojemMhNTdMJuaQvovwt","scmsg2jUikwFvAacRFXSsFiohz8Ac1FzFxUMMDjj2FRn4wwbEue9GE1"] + +POST http://[HOST]:[PORT]/local/batcher/injection +Content-type: application/json +[ + "332d31", + "332d32", + "332d33", + "332d34", + "332d35" +] +200 OK +["scmsg3dpi4Ju7FjurV5ygFfchk7AzEs4fkQwP5osyEC7KPwmpShKQsd","scmsg3U7d1dZ6fZnGSkcQaSgQGmCA59fM9HdfaAxn6iUyhMjJ7tzPvg","scmsg2ybFzC2zNxSiU3JGDQXpUFBaMSyruCrTNxncJGaAJWpKGoSwVD","scmsg3JAvYLWsukJD9qdzj71rMsUSVJiM1sgMfRtu9CYmJWL3unEmdw","scmsg2KGintqwnFJ2DpEqPzQ2wwKrqE4aWXePHaac6Wk4dZ2zYDzxq4"] + +POST http://[HOST]:[PORT]/local/batcher/injection +Content-type: application/json +[ + "342d31", + "342d32", + "342d33", + "342d34", + "342d35" +] +200 OK +["scmsg3ig6Tfoiqy7J4pvCA69Xq5YGYwazYFwsiUYwjdKKBmVEiM7Bra","scmsg3DuDpF1odPhoS6mVEYT1FFyrmUJWEdqqAL768KE3rV3yWBwEMo","scmsg2M9XdPeq2rrhH9wCVw9EuaUmhvwdkSHMUfBbCKx3TBoV2j8Zzk","scmsg3BwQX5BJdaTdYf9UcqV3G6CwyxJjY5YaTTwSZKmZJi35GCTRk7","scmsg3ZLrgvvQaWKJgHB3nyQPoR5n7HDHbGp6nX2SeB1So3Fv1zVqdR"] + +POST http://[HOST]:[PORT]/local/batcher/injection +Content-type: application/json +[ + "352d31", + "352d32", + "352d33", + "352d34", + "352d35" +] +200 OK +["scmsg32tLLi3b6P9nffRaxRYevU9nFKTPLRcdGGDGMuBYxoNp49h9fU","scmsg2WrjiWD88fiebhAnoZokDocz9XF2MqPp15hpJT5xGKG7jvBnph","scmsg3TWeiN93kzSuDei1T7PiBoxjVUxdqvMnDZ1x1bW7deVakpCBaH","scmsg238Qn5sQPP83DUtBamFgXEy3M74Zb23Cd2AuaQNiKWrhhoCMRZ","scmsg2fcex43ebcuDHXPaTFn4gaPsBoUtcZLZ4LatPvEppoxX8USaKb"] + +POST http://[HOST]:[PORT]/local/batcher/injection +Content-type: application/json +[ + "362d31", + "362d32", + "362d33", + "362d34", + "362d35" +] +200 OK +["scmsg33ufVokVubNNkPvhVC2xVA6YQjePMV9KYHk3suh4aUmZLkxDUJ","scmsg2Veu8WVquo3WghLBoabaFsxJvQmrgRSR3J94RDEp18oecM9jwE","scmsg3UB9prgoMs2tgEE9avT1FMTiUUT6Ub67fHdFsCGFqs3XXTUraS","scmsg2ppDMPFbXGEhnkASETkvG4AdmigQzPjDPZPmAU63nJrXtBG9FD","scmsg3BonQUZ9SLxwfDKKefrJWnkAF4A9cVmaDAKSxnq2dBoX7vmXRp"] + +POST http://[HOST]:[PORT]/local/batcher/injection +Content-type: application/json +[ + "372d31", + "372d32", + "372d33", + "372d34", + "372d35" +] +200 OK +["scmsg3b7y5ysxfjzRiSQHWygRQ37bf8fVtum2rfk7YuXLQQzyt9bsU5","scmsg3NrtDG5ynFbdEXewroyCm1rcFNHYRWQvBj3ooG1kg1JWB5gSUd","scmsg3CS5J6TrAvHKZ31PLBmLtzciH5xH8rRLsR8tY6kmm7TdkHFbKk","scmsg3i6pSyMeUewYGSEbPX2skk552iKmiq9RPsHCeG8WMJfBGMenXy","scmsg3GbNbAxSSFpnYneMLBRnL4h6byyJpkTCpc5GwzyAEtS2xxpSCB"] + +POST http://[HOST]:[PORT]/local/batcher/injection +Content-type: application/json +[ + "382d31", + "382d32", + "382d33", + "382d34", + "382d35" +] +200 OK +["scmsg2jSC9soMxLqf9EkgtoZioZbg9q25LBeBKmkwsmGD2a8E9FaZtx","scmsg3tcqypPLNPYVC6Rtx62jZyT6Tqmua6JeSvfqLMg4ENmC78R6VT","scmsg2y9Lzxk9vmPzdijxtV1cisN9GTfWfZYfsShGiaTNqUc15xqGfm","scmsg2pHbd5gcWS41nzKcfhtzQXaC7T1PkZpEqmGrY6LCjBLJzkqZQ8","scmsg3nJkzbFRfd6EQnw3QhyDC3W42WSiJ9t2iqZ46k1Ji6tuCxjSuN"] + +POST http://[HOST]:[PORT]/local/batcher/injection +Content-type: application/json +[ + "392d31", + "392d32", + "392d33", + "392d34", + "392d35" +] +200 OK +["scmsg3RKmNvZggo7jsdgM71HebGFG3TfY2gh3Z1t4FNPqYReqBHHnxr","scmsg34ExXXaeUBYvf6hRTGAP64S34iYSjyyC9osS5FnRyjdDMpLaPm","scmsg23fBMGvPyBaNKS35AkuZM4PpnHaup3bSrLYX8ZshhX4aZ1c4mE","scmsg3qr7fZEEBrTZxn5YZKMDgQ1hBDf5GwKecJegJXJyLGp6nxcLBA","scmsg2NnWpkUjWnZPgfE2pnCFyY7SA471dnLU8PvGD89YjuJfntfA53"] + +POST http://[HOST]:[PORT]/local/batcher/injection +Content-type: application/json +[ + "31302d31", + "31302d32", + "31302d33", + "31302d34", + "31302d35" +] +200 OK +["scmsg23W2XaqYXEuxFFmQRZwgWwXiJ972nU4Tk1Um3KGGdr3AkqGBhV","scmsg24JUWpQyRvpYVRc2tgtMUJbzMrJWNLtidsZsLEfjBGiMFNuMTK","scmsg3bfLJRgEbj7FTBCnJ6ZsuxMf8KNVLLCZ8EkpTQbiLDAVmWB3rS","scmsg3NGcoW9bSrz6A5BvXcnxk2255yAWCGsJgrGadjzE4JpKpFveZe","scmsg3eAbwnP55Vg8rGZRBQtWHeLDWmWEJhXvm62cNa2r6mj7Y8iMxH"] + +POST http://[HOST]:[PORT]/local/batcher/injection +Content-type: application/json +[ + "31312d31", + "31312d32", + "31312d33", + "31312d34", + "31312d35" +] +200 OK +["scmsg2vRX381tupMRsoWfXMYG4rVEkNiVZZxAhjSDmpdHbUjS9RUwQs","scmsg3Mph77VXyCm4m7FEDFoqyPzuevdN7W3UNqnXJttYRTtvY3vVVW","scmsg3dZBvX4ArK26LoCqR7mKhMa5DnWm9T5CEYZPfrUo8xhz6bbofd","scmsg237KVpGjKJxKPnGvYSLqkqtEURVG6ive7dNqPo2x6X4SDhsctC","scmsg2rFWrXD9jQ8UjL6XRcKRq6HJQ9fgdKgXxf4Auj5F5bxzYwqkXG"] + +POST http://[HOST]:[PORT]/local/batcher/injection +Content-type: application/json +[ + "31322d31", + "31322d32", + "31322d33", + "31322d34", + "31322d35" +] +200 OK +["scmsg2k1HPFjtZFMUJUHKFbMkaijLa5ns8UMRb5h2d8iynf1gzeFCHZ","scmsg3UdpWDaa29XQ3hYHZwe2kiLfZ24KrkcVh58TR2yzYJsAoBGVwa","scmsg2qG2uiM8p8i49D1xp8NiznNAe6e3CFPwQgb4nzty9Wmp4qspzq","scmsg3B2psrEy3hxFURDzLtYqPckSycE8NWbuB2pZwKk6LaRhPUR3jh","scmsg3ccqjHaRH7gE98cZGvFBNHj1QgnPNZiByvB5rGpQMMC7MthkR8"] + +POST http://[HOST]:[PORT]/local/batcher/injection +Content-type: application/json +[ + "31332d31", + "31332d32", + "31332d33", + "31332d34", + "31332d35" +] +200 OK +["scmsg3ifsPVXRWwo4BiBUcFJcNNpdCGhnJrcFQqH8accCscdE6Jdgys","scmsg3TsK1LMasQkWUcXhoVZBCaEJDFk8jVUxQkjEzRUi7AJU8KGXRE","scmsg3R5hqiXmVL8aQTzzTKwfiFXsoDj9dE96A3B3cd2VsUBZGdjszC","scmsg27jkAx3C8K2w8m6bPUHMsBNKqZ6y1A2XN5Ujw4nCmbVWzK6iHr","scmsg38gfhAaeaNovx4XAKCQ97D9zEGmebYXvkKioDWd5DMLdAbVCDF"] + +POST http://[HOST]:[PORT]/local/batcher/injection +Content-type: application/json +[ + "31342d31", + "31342d32", + "31342d33", + "31342d34", + "31342d35" +] +200 OK +["scmsg3SQfcD9VuNoUzUpQqGtETZXiMmDVLHUkHVxKiA87kYaYVsjDNJ","scmsg25qzqinLMRAeezYjTR4gh3srY4H6mMRpGARJeqTm31mKSKjnE7","scmsg322sHe82ordku5DRR4xQ6Di3SdxjxV8kg7CBAdE7sxAmcD8yA5","scmsg2xCTmrcSLzzxUP5aXpdGS4zLttFQiuTLzEafuRrXGmsAyDXqVg","scmsg3CjDyDbTJcNtexxdqhqUPLoWgW9bjyvgsFnpQDNbDK2MK7Ju9o"] + +POST http://[HOST]:[PORT]/local/batcher/injection +Content-type: application/json +[ + "31352d31", + "31352d32", + "31352d33", + "31352d34", + "31352d35" +] +200 OK +["scmsg1zCJDwhb18RRCCXJgXxXXeapZWzCz1zn89xqnKf1esT7kvMWUb","scmsg3idErXZUCGh2rHHUvuWqGgCXH523QLfPEdXh7JzVUGZ2acJbCC","scmsg2sfmQTX5TmmLKkAeRg4sWHXLnmWjnouYUXUNCLVsSNWPnNtLR9","scmsg2xipcKaxHRPtpk4qCZ2tB6JG8h4UPtr9GP7fyyZFxXB69NSNDi","scmsg21cRa6ffrFxAQVXQvA5MsVfNSN9NP2ku7NdYvNywfHA4U4Jp65"] + +GET http://[HOST]:[PORT]/global/block/head/hash +200 OK +"[BLOCK_HASH]" + +GET http://[HOST]:[PORT]/global/block/5/hash +200 OK +"[BLOCK_HASH]" + +GET http://[HOST]:[PORT]/global/block/finalized/level +200 OK +16 + +GET http://[HOST]:[PORT]/global/block/head/num_messages +200 OK +"8" + +GET http://[HOST]:[PORT]/global/block/head/status +200 OK +"riscv_dummy_status" + +GET http://[HOST]:[PORT]/global/block/head/ticks +200 OK +"8" + +GET http://[HOST]:[PORT]/global/block/head/state_hash +200 OK +"[SC_ROLLUP_PVM_STATE_HASH]" + +GET http://[HOST]:[PORT]/global/tezos_head +200 OK +"[BLOCK_HASH]" + +GET http://[HOST]:[PORT]/global/tezos_level +200 OK +18 + diff --git a/tezt/tests/expected/sc_rollup.ml/Alpha- riscv - node advances PVM state with messages (external).out b/tezt/tests/expected/sc_rollup.ml/Alpha- riscv - node advances PVM state with messages (external).out new file mode 100644 index 000000000000..d595d20afd09 --- /dev/null +++ b/tezt/tests/expected/sc_rollup.ml/Alpha- riscv - node advances PVM state with messages (external).out @@ -0,0 +1,445 @@ + +./octez-client --wait none originate smart rollup rollup from bootstrap1 of kind riscv of type bytes with kernel --burn-cap 9999999 +Node is bootstrapped. +Estimated gas: 1930.030 units (will add 100 for safety) +Estimated storage: 6552 bytes added (will add 20 for safety) +Operation successfully injected in the node. +Operation hash is '[OPERATION_HASH]' +NOT waiting for the operation to be included. +Use command + octez-client wait for [OPERATION_HASH] to be included --confirmations 1 --branch [BLOCK_HASH] +and/or an external block explorer to make sure that it has been included. +This sequence of operations was run: + Manager signed operations: + From: [PUBLIC_KEY_HASH] + Fee to the baker: ꜩ0.000441 + Expected counter: 1 + Gas limit: 2031 + Storage limit: 6572 bytes + Balance updates: + [PUBLIC_KEY_HASH] ... -ꜩ0.000441 + payload fees(the block proposer) ....... +ꜩ0.000441 + Smart rollup origination: + Kind: riscv + Parameter type: bytes + Kernel Blake2B hash: '0e5751c026e543b2e8ab2eb06099daa1d1e5df47778f7787faab45cdf12fe3a8' + This smart rollup origination was successfully applied + Consumed gas: 1929.997 + Storage size: 6552 bytes + Address: [SMART_ROLLUP_HASH] + Genesis commitment hash: [SC_ROLLUP_COMMITMENT_HASH] + Balance updates: + [PUBLIC_KEY_HASH] ... -ꜩ1.638 + storage fees ........................... +ꜩ1.638 + +Smart rollup [SMART_ROLLUP_HASH] memorized as "rollup" +GET http://[HOST]:[PORT]/global/block/head/state_hash +200 OK +"[SC_ROLLUP_PVM_STATE_HASH]" + +GET http://[HOST]:[PORT]/global/block/head/total_ticks +200 OK +"0" + + +./octez-client --wait none send smart rollup message '["1 6 + value"]' from bootstrap2 +Node is bootstrapped. +Estimated gas: 170.597 units (will add 100 for safety) +Estimated storage: no bytes added +Operation successfully injected in the node. +Operation hash is '[OPERATION_HASH]' +NOT waiting for the operation to be included. +Use command + octez-client wait for [OPERATION_HASH] to be included --confirmations 1 --branch [BLOCK_HASH] +and/or an external block explorer to make sure that it has been included. +This sequence of operations was run: + Manager signed operations: + From: [PUBLIC_KEY_HASH] + Fee to the baker: ꜩ0.000271 + Expected counter: 1 + Gas limit: 271 + Storage limit: 0 bytes + Balance updates: + [PUBLIC_KEY_HASH] ... -ꜩ0.000271 + payload fees(the block proposer) ....... +ꜩ0.000271 + Smart rollup messages submission: + This smart rollup messages submission was successfully applied + Consumed gas: 170.530 + +GET http://[HOST]:[PORT]/global/block/head/state_hash +200 OK +"[SC_ROLLUP_PVM_STATE_HASH]" + +GET http://[HOST]:[PORT]/global/block/head/total_ticks +200 OK +"4" + +GET http://[HOST]:[PORT]/global/block/head/state_hash +200 OK +"[SC_ROLLUP_PVM_STATE_HASH]" + +GET http://[HOST]:[PORT]/global/block/head/total_ticks +200 OK +"4" + + +./octez-client --wait none send smart rollup message '["2 8 + value"]' from bootstrap2 +Node is bootstrapped. +Estimated gas: 170.597 units (will add 100 for safety) +Estimated storage: no bytes added +Operation successfully injected in the node. +Operation hash is '[OPERATION_HASH]' +NOT waiting for the operation to be included. +Use command + octez-client wait for [OPERATION_HASH] to be included --confirmations 1 --branch [BLOCK_HASH] +and/or an external block explorer to make sure that it has been included. +This sequence of operations was run: + Manager signed operations: + From: [PUBLIC_KEY_HASH] + Fee to the baker: ꜩ0.000271 + Expected counter: 2 + Gas limit: 271 + Storage limit: 0 bytes + Balance updates: + [PUBLIC_KEY_HASH] ... -ꜩ0.000271 + payload fees(the block proposer) ....... +ꜩ0.000271 + Smart rollup messages submission: + This smart rollup messages submission was successfully applied + Consumed gas: 170.530 + +GET http://[HOST]:[PORT]/global/block/head/state_hash +200 OK +"[SC_ROLLUP_PVM_STATE_HASH]" + +GET http://[HOST]:[PORT]/global/block/head/total_ticks +200 OK +"8" + +GET http://[HOST]:[PORT]/global/block/head/state_hash +200 OK +"[SC_ROLLUP_PVM_STATE_HASH]" + +GET http://[HOST]:[PORT]/global/block/head/total_ticks +200 OK +"8" + + +./octez-client --wait none send smart rollup message '["3 10 + value"]' from bootstrap2 +Node is bootstrapped. +Estimated gas: 170.635 units (will add 100 for safety) +Estimated storage: no bytes added +Operation successfully injected in the node. +Operation hash is '[OPERATION_HASH]' +NOT waiting for the operation to be included. +Use command + octez-client wait for [OPERATION_HASH] to be included --confirmations 1 --branch [BLOCK_HASH] +and/or an external block explorer to make sure that it has been included. +This sequence of operations was run: + Manager signed operations: + From: [PUBLIC_KEY_HASH] + Fee to the baker: ꜩ0.000272 + Expected counter: 3 + Gas limit: 271 + Storage limit: 0 bytes + Balance updates: + [PUBLIC_KEY_HASH] ... -ꜩ0.000272 + payload fees(the block proposer) ....... +ꜩ0.000272 + Smart rollup messages submission: + This smart rollup messages submission was successfully applied + Consumed gas: 170.569 + +GET http://[HOST]:[PORT]/global/block/head/state_hash +200 OK +"[SC_ROLLUP_PVM_STATE_HASH]" + +GET http://[HOST]:[PORT]/global/block/head/total_ticks +200 OK +"12" + +GET http://[HOST]:[PORT]/global/block/head/state_hash +200 OK +"[SC_ROLLUP_PVM_STATE_HASH]" + +GET http://[HOST]:[PORT]/global/block/head/total_ticks +200 OK +"12" + + +./octez-client --wait none send smart rollup message '["4 12 + value"]' from bootstrap2 +Node is bootstrapped. +Estimated gas: 170.635 units (will add 100 for safety) +Estimated storage: no bytes added +Operation successfully injected in the node. +Operation hash is '[OPERATION_HASH]' +NOT waiting for the operation to be included. +Use command + octez-client wait for [OPERATION_HASH] to be included --confirmations 1 --branch [BLOCK_HASH] +and/or an external block explorer to make sure that it has been included. +This sequence of operations was run: + Manager signed operations: + From: [PUBLIC_KEY_HASH] + Fee to the baker: ꜩ0.000272 + Expected counter: 4 + Gas limit: 271 + Storage limit: 0 bytes + Balance updates: + [PUBLIC_KEY_HASH] ... -ꜩ0.000272 + payload fees(the block proposer) ....... +ꜩ0.000272 + Smart rollup messages submission: + This smart rollup messages submission was successfully applied + Consumed gas: 170.569 + +GET http://[HOST]:[PORT]/global/block/head/state_hash +200 OK +"[SC_ROLLUP_PVM_STATE_HASH]" + +GET http://[HOST]:[PORT]/global/block/head/total_ticks +200 OK +"16" + +GET http://[HOST]:[PORT]/global/block/head/state_hash +200 OK +"[SC_ROLLUP_PVM_STATE_HASH]" + +GET http://[HOST]:[PORT]/global/block/head/total_ticks +200 OK +"16" + + +./octez-client --wait none send smart rollup message '["5 14 + value"]' from bootstrap2 +Node is bootstrapped. +Estimated gas: 170.635 units (will add 100 for safety) +Estimated storage: no bytes added +Operation successfully injected in the node. +Operation hash is '[OPERATION_HASH]' +NOT waiting for the operation to be included. +Use command + octez-client wait for [OPERATION_HASH] to be included --confirmations 1 --branch [BLOCK_HASH] +and/or an external block explorer to make sure that it has been included. +This sequence of operations was run: + Manager signed operations: + From: [PUBLIC_KEY_HASH] + Fee to the baker: ꜩ0.000272 + Expected counter: 5 + Gas limit: 271 + Storage limit: 0 bytes + Balance updates: + [PUBLIC_KEY_HASH] ... -ꜩ0.000272 + payload fees(the block proposer) ....... +ꜩ0.000272 + Smart rollup messages submission: + This smart rollup messages submission was successfully applied + Consumed gas: 170.569 + +GET http://[HOST]:[PORT]/global/block/head/state_hash +200 OK +"[SC_ROLLUP_PVM_STATE_HASH]" + +GET http://[HOST]:[PORT]/global/block/head/total_ticks +200 OK +"20" + +GET http://[HOST]:[PORT]/global/block/head/state_hash +200 OK +"[SC_ROLLUP_PVM_STATE_HASH]" + +GET http://[HOST]:[PORT]/global/block/head/total_ticks +200 OK +"20" + + +./octez-client --wait none send smart rollup message '["6 16 + value"]' from bootstrap2 +Node is bootstrapped. +Estimated gas: 170.635 units (will add 100 for safety) +Estimated storage: no bytes added +Operation successfully injected in the node. +Operation hash is '[OPERATION_HASH]' +NOT waiting for the operation to be included. +Use command + octez-client wait for [OPERATION_HASH] to be included --confirmations 1 --branch [BLOCK_HASH] +and/or an external block explorer to make sure that it has been included. +This sequence of operations was run: + Manager signed operations: + From: [PUBLIC_KEY_HASH] + Fee to the baker: ꜩ0.000272 + Expected counter: 6 + Gas limit: 271 + Storage limit: 0 bytes + Balance updates: + [PUBLIC_KEY_HASH] ... -ꜩ0.000272 + payload fees(the block proposer) ....... +ꜩ0.000272 + Smart rollup messages submission: + This smart rollup messages submission was successfully applied + Consumed gas: 170.569 + +GET http://[HOST]:[PORT]/global/block/head/state_hash +200 OK +"[SC_ROLLUP_PVM_STATE_HASH]" + +GET http://[HOST]:[PORT]/global/block/head/total_ticks +200 OK +"24" + +GET http://[HOST]:[PORT]/global/block/head/state_hash +200 OK +"[SC_ROLLUP_PVM_STATE_HASH]" + +GET http://[HOST]:[PORT]/global/block/head/total_ticks +200 OK +"24" + + +./octez-client --wait none send smart rollup message '["7 18 + value"]' from bootstrap2 +Node is bootstrapped. +Estimated gas: 170.635 units (will add 100 for safety) +Estimated storage: no bytes added +Operation successfully injected in the node. +Operation hash is '[OPERATION_HASH]' +NOT waiting for the operation to be included. +Use command + octez-client wait for [OPERATION_HASH] to be included --confirmations 1 --branch [BLOCK_HASH] +and/or an external block explorer to make sure that it has been included. +This sequence of operations was run: + Manager signed operations: + From: [PUBLIC_KEY_HASH] + Fee to the baker: ꜩ0.000272 + Expected counter: 7 + Gas limit: 271 + Storage limit: 0 bytes + Balance updates: + [PUBLIC_KEY_HASH] ... -ꜩ0.000272 + payload fees(the block proposer) ....... +ꜩ0.000272 + Smart rollup messages submission: + This smart rollup messages submission was successfully applied + Consumed gas: 170.569 + +GET http://[HOST]:[PORT]/global/block/head/state_hash +200 OK +"[SC_ROLLUP_PVM_STATE_HASH]" + +GET http://[HOST]:[PORT]/global/block/head/total_ticks +200 OK +"28" + +GET http://[HOST]:[PORT]/global/block/head/state_hash +200 OK +"[SC_ROLLUP_PVM_STATE_HASH]" + +GET http://[HOST]:[PORT]/global/block/head/total_ticks +200 OK +"28" + + +./octez-client --wait none send smart rollup message '["8 20 + value"]' from bootstrap2 +Node is bootstrapped. +Estimated gas: 170.635 units (will add 100 for safety) +Estimated storage: no bytes added +Operation successfully injected in the node. +Operation hash is '[OPERATION_HASH]' +NOT waiting for the operation to be included. +Use command + octez-client wait for [OPERATION_HASH] to be included --confirmations 1 --branch [BLOCK_HASH] +and/or an external block explorer to make sure that it has been included. +This sequence of operations was run: + Manager signed operations: + From: [PUBLIC_KEY_HASH] + Fee to the baker: ꜩ0.000272 + Expected counter: 8 + Gas limit: 271 + Storage limit: 0 bytes + Balance updates: + [PUBLIC_KEY_HASH] ... -ꜩ0.000272 + payload fees(the block proposer) ....... +ꜩ0.000272 + Smart rollup messages submission: + This smart rollup messages submission was successfully applied + Consumed gas: 170.569 + +GET http://[HOST]:[PORT]/global/block/head/state_hash +200 OK +"[SC_ROLLUP_PVM_STATE_HASH]" + +GET http://[HOST]:[PORT]/global/block/head/total_ticks +200 OK +"32" + +GET http://[HOST]:[PORT]/global/block/head/state_hash +200 OK +"[SC_ROLLUP_PVM_STATE_HASH]" + +GET http://[HOST]:[PORT]/global/block/head/total_ticks +200 OK +"32" + + +./octez-client --wait none send smart rollup message '["9 22 + value"]' from bootstrap2 +Node is bootstrapped. +Estimated gas: 170.635 units (will add 100 for safety) +Estimated storage: no bytes added +Operation successfully injected in the node. +Operation hash is '[OPERATION_HASH]' +NOT waiting for the operation to be included. +Use command + octez-client wait for [OPERATION_HASH] to be included --confirmations 1 --branch [BLOCK_HASH] +and/or an external block explorer to make sure that it has been included. +This sequence of operations was run: + Manager signed operations: + From: [PUBLIC_KEY_HASH] + Fee to the baker: ꜩ0.000272 + Expected counter: 9 + Gas limit: 271 + Storage limit: 0 bytes + Balance updates: + [PUBLIC_KEY_HASH] ... -ꜩ0.000272 + payload fees(the block proposer) ....... +ꜩ0.000272 + Smart rollup messages submission: + This smart rollup messages submission was successfully applied + Consumed gas: 170.569 + +GET http://[HOST]:[PORT]/global/block/head/state_hash +200 OK +"[SC_ROLLUP_PVM_STATE_HASH]" + +GET http://[HOST]:[PORT]/global/block/head/total_ticks +200 OK +"36" + +GET http://[HOST]:[PORT]/global/block/head/state_hash +200 OK +"[SC_ROLLUP_PVM_STATE_HASH]" + +GET http://[HOST]:[PORT]/global/block/head/total_ticks +200 OK +"36" + + +./octez-client --wait none send smart rollup message '["10 24 + value"]' from bootstrap2 +Node is bootstrapped. +Estimated gas: 170.673 units (will add 100 for safety) +Estimated storage: no bytes added +Operation successfully injected in the node. +Operation hash is '[OPERATION_HASH]' +NOT waiting for the operation to be included. +Use command + octez-client wait for [OPERATION_HASH] to be included --confirmations 1 --branch [BLOCK_HASH] +and/or an external block explorer to make sure that it has been included. +This sequence of operations was run: + Manager signed operations: + From: [PUBLIC_KEY_HASH] + Fee to the baker: ꜩ0.000273 + Expected counter: 10 + Gas limit: 271 + Storage limit: 0 bytes + Balance updates: + [PUBLIC_KEY_HASH] ... -ꜩ0.000273 + payload fees(the block proposer) ....... +ꜩ0.000273 + Smart rollup messages submission: + This smart rollup messages submission was successfully applied + Consumed gas: 170.607 + +GET http://[HOST]:[PORT]/global/block/head/state_hash +200 OK +"[SC_ROLLUP_PVM_STATE_HASH]" + +GET http://[HOST]:[PORT]/global/block/head/total_ticks +200 OK +"40" + diff --git a/tezt/tests/expected/sc_rollup.ml/Alpha- riscv - node advances PVM state with messages (internal).out b/tezt/tests/expected/sc_rollup.ml/Alpha- riscv - node advances PVM state with messages (internal).out new file mode 100644 index 000000000000..dcc2a0eee22e --- /dev/null +++ b/tezt/tests/expected/sc_rollup.ml/Alpha- riscv - node advances PVM state with messages (internal).out @@ -0,0 +1,195 @@ + +./octez-client --wait none originate smart rollup rollup from bootstrap1 of kind riscv of type bytes with kernel --burn-cap 9999999 +Node is bootstrapped. +Estimated gas: 1930.030 units (will add 100 for safety) +Estimated storage: 6552 bytes added (will add 20 for safety) +Operation successfully injected in the node. +Operation hash is '[OPERATION_HASH]' +NOT waiting for the operation to be included. +Use command + octez-client wait for [OPERATION_HASH] to be included --confirmations 1 --branch [BLOCK_HASH] +and/or an external block explorer to make sure that it has been included. +This sequence of operations was run: + Manager signed operations: + From: [PUBLIC_KEY_HASH] + Fee to the baker: ꜩ0.000441 + Expected counter: 1 + Gas limit: 2031 + Storage limit: 6572 bytes + Balance updates: + [PUBLIC_KEY_HASH] ... -ꜩ0.000441 + payload fees(the block proposer) ....... +ꜩ0.000441 + Smart rollup origination: + Kind: riscv + Parameter type: bytes + Kernel Blake2B hash: '0e5751c026e543b2e8ab2eb06099daa1d1e5df47778f7787faab45cdf12fe3a8' + This smart rollup origination was successfully applied + Consumed gas: 1929.997 + Storage size: 6552 bytes + Address: [SMART_ROLLUP_HASH] + Genesis commitment hash: [SC_ROLLUP_COMMITMENT_HASH] + Balance updates: + [PUBLIC_KEY_HASH] ... -ꜩ1.638 + storage fees ........................... +ꜩ1.638 + +Smart rollup [SMART_ROLLUP_HASH] memorized as "rollup" +GET http://[HOST]:[PORT]/global/block/head/state_hash +200 OK +"[SC_ROLLUP_PVM_STATE_HASH]" + +GET http://[HOST]:[PORT]/global/block/head/total_ticks +200 OK +"3" + +GET http://[HOST]:[PORT]/global/block/head/state_hash +200 OK +"[SC_ROLLUP_PVM_STATE_HASH]" + +GET http://[HOST]:[PORT]/global/block/head/total_ticks +200 OK +"7" + +GET http://[HOST]:[PORT]/global/block/head/state_hash +200 OK +"[SC_ROLLUP_PVM_STATE_HASH]" + +GET http://[HOST]:[PORT]/global/block/head/total_ticks +200 OK +"7" + +GET http://[HOST]:[PORT]/global/block/head/state_hash +200 OK +"[SC_ROLLUP_PVM_STATE_HASH]" + +GET http://[HOST]:[PORT]/global/block/head/total_ticks +200 OK +"11" + +GET http://[HOST]:[PORT]/global/block/head/state_hash +200 OK +"[SC_ROLLUP_PVM_STATE_HASH]" + +GET http://[HOST]:[PORT]/global/block/head/total_ticks +200 OK +"11" + +GET http://[HOST]:[PORT]/global/block/head/state_hash +200 OK +"[SC_ROLLUP_PVM_STATE_HASH]" + +GET http://[HOST]:[PORT]/global/block/head/total_ticks +200 OK +"15" + +GET http://[HOST]:[PORT]/global/block/head/state_hash +200 OK +"[SC_ROLLUP_PVM_STATE_HASH]" + +GET http://[HOST]:[PORT]/global/block/head/total_ticks +200 OK +"15" + +GET http://[HOST]:[PORT]/global/block/head/state_hash +200 OK +"[SC_ROLLUP_PVM_STATE_HASH]" + +GET http://[HOST]:[PORT]/global/block/head/total_ticks +200 OK +"19" + +GET http://[HOST]:[PORT]/global/block/head/state_hash +200 OK +"[SC_ROLLUP_PVM_STATE_HASH]" + +GET http://[HOST]:[PORT]/global/block/head/total_ticks +200 OK +"19" + +GET http://[HOST]:[PORT]/global/block/head/state_hash +200 OK +"[SC_ROLLUP_PVM_STATE_HASH]" + +GET http://[HOST]:[PORT]/global/block/head/total_ticks +200 OK +"23" + +GET http://[HOST]:[PORT]/global/block/head/state_hash +200 OK +"[SC_ROLLUP_PVM_STATE_HASH]" + +GET http://[HOST]:[PORT]/global/block/head/total_ticks +200 OK +"23" + +GET http://[HOST]:[PORT]/global/block/head/state_hash +200 OK +"[SC_ROLLUP_PVM_STATE_HASH]" + +GET http://[HOST]:[PORT]/global/block/head/total_ticks +200 OK +"27" + +GET http://[HOST]:[PORT]/global/block/head/state_hash +200 OK +"[SC_ROLLUP_PVM_STATE_HASH]" + +GET http://[HOST]:[PORT]/global/block/head/total_ticks +200 OK +"27" + +GET http://[HOST]:[PORT]/global/block/head/state_hash +200 OK +"[SC_ROLLUP_PVM_STATE_HASH]" + +GET http://[HOST]:[PORT]/global/block/head/total_ticks +200 OK +"31" + +GET http://[HOST]:[PORT]/global/block/head/state_hash +200 OK +"[SC_ROLLUP_PVM_STATE_HASH]" + +GET http://[HOST]:[PORT]/global/block/head/total_ticks +200 OK +"31" + +GET http://[HOST]:[PORT]/global/block/head/state_hash +200 OK +"[SC_ROLLUP_PVM_STATE_HASH]" + +GET http://[HOST]:[PORT]/global/block/head/total_ticks +200 OK +"35" + +GET http://[HOST]:[PORT]/global/block/head/state_hash +200 OK +"[SC_ROLLUP_PVM_STATE_HASH]" + +GET http://[HOST]:[PORT]/global/block/head/total_ticks +200 OK +"35" + +GET http://[HOST]:[PORT]/global/block/head/state_hash +200 OK +"[SC_ROLLUP_PVM_STATE_HASH]" + +GET http://[HOST]:[PORT]/global/block/head/total_ticks +200 OK +"39" + +GET http://[HOST]:[PORT]/global/block/head/state_hash +200 OK +"[SC_ROLLUP_PVM_STATE_HASH]" + +GET http://[HOST]:[PORT]/global/block/head/total_ticks +200 OK +"39" + +GET http://[HOST]:[PORT]/global/block/head/state_hash +200 OK +"[SC_ROLLUP_PVM_STATE_HASH]" + +GET http://[HOST]:[PORT]/global/block/head/total_ticks +200 OK +"43" + diff --git a/tezt/tests/expected/sc_rollup.ml/Alpha- riscv - origination of a SCORU executes without error.out b/tezt/tests/expected/sc_rollup.ml/Alpha- riscv - origination of a SCORU executes without error.out new file mode 100644 index 000000000000..0bc0a64f0ad1 --- /dev/null +++ b/tezt/tests/expected/sc_rollup.ml/Alpha- riscv - origination of a SCORU executes without error.out @@ -0,0 +1,35 @@ + +./octez-client --wait none originate smart rollup rollup from bootstrap1 of kind riscv of type string with kernel --burn-cap 9999999 +Node is bootstrapped. +Estimated gas: 1930.030 units (will add 100 for safety) +Estimated storage: 6552 bytes added (will add 20 for safety) +Operation successfully injected in the node. +Operation hash is '[OPERATION_HASH]' +NOT waiting for the operation to be included. +Use command + octez-client wait for [OPERATION_HASH] to be included --confirmations 1 --branch [BLOCK_HASH] +and/or an external block explorer to make sure that it has been included. +This sequence of operations was run: + Manager signed operations: + From: [PUBLIC_KEY_HASH] + Fee to the baker: ꜩ0.000441 + Expected counter: 1 + Gas limit: 2031 + Storage limit: 6572 bytes + Balance updates: + [PUBLIC_KEY_HASH] ... -ꜩ0.000441 + payload fees(the block proposer) ....... +ꜩ0.000441 + Smart rollup origination: + Kind: riscv + Parameter type: string + Kernel Blake2B hash: '0e5751c026e543b2e8ab2eb06099daa1d1e5df47778f7787faab45cdf12fe3a8' + This smart rollup origination was successfully applied + Consumed gas: 1929.997 + Storage size: 6552 bytes + Address: [SMART_ROLLUP_HASH] + Genesis commitment hash: [SC_ROLLUP_COMMITMENT_HASH] + Balance updates: + [PUBLIC_KEY_HASH] ... -ꜩ1.638 + storage fees ........................... +ꜩ1.638 + +Smart rollup [SMART_ROLLUP_HASH] memorized as "rollup" diff --git a/tezt/tests/sc_rollup.ml b/tezt/tests/sc_rollup.ml index 50e2a8ac9e08..7abb2a72edd1 100644 --- a/tezt/tests/sc_rollup.ml +++ b/tezt/tests/sc_rollup.ml @@ -164,6 +164,7 @@ let test_l1_scenario ?supports ?regression ?hooks ~kind ?boot_sector ?timeout ?whitelist_enable ?rpc_external + ~riscv_pvm_enable:(kind = "riscv") protocol in let* sc_rollup = @@ -649,7 +650,7 @@ let bake_until_event ?hook ?(at_least = 0) ?(timeout = 15.) client ?event_name | Lwt_unix.Timeout -> Test.fail "Timeout of %f seconds reached when waiting for event %a to \ - happens." + happen." timeout (Format.pp_print_option Format.pp_print_string) event_name @@ -1467,6 +1468,7 @@ let test_rollup_node_advances_pvm_state ?regression ~title ?boot_sector no_parse_bad_fingerprint.wasm - Stuck state due to parse error *) unit + | "riscv" -> unit | _otherwise -> raise (Invalid_argument kind) in @@ -4451,6 +4453,7 @@ let test_rpcs ~kind Check.((kernel_subkeys = ["boot.wasm"; "env"]) (list string)) ~error_msg:"The key's subkeys are %L but should be %R" ; return () + | "riscv" -> return () | _ -> failwith "incorrect kind" in let* _status = @@ -5904,18 +5907,17 @@ let start_rollup_node_with_encrypted_key ~kind = unit let register_riscv ~protocols = - (* TODO https://app.asana.com/0/0/1206991649221091/f - * change this to `Protocol.(From_protocol 019)` once RISC-V storage layer - * is implemented *) - let supports = Protocol.(Between_protocols (19, 19)) in - test_rollup_node_boots_into_initial_state protocols ~supports ~kind:"riscv" ; + let kind = "riscv" in + test_origination ~kind protocols ; + test_rpcs ~kind protocols ; + test_rollup_node_boots_into_initial_state protocols ~kind ; + test_rollup_node_advances_pvm_state protocols ~kind ~internal:false ; + test_rollup_node_advances_pvm_state protocols ~kind ~internal:true ; test_commitment_scenario - ~supports - ~extra_tags:["modes"; "operator"] - ~variant:"operator_publishes" - (mode_publish Operator true) + ~variant:"commitment_is_stored" + commitment_stored protocols - ~kind:"riscv" + ~kind let register ~kind ~protocols = test_origination ~kind protocols ; @@ -6053,7 +6055,7 @@ let register ~protocols = ~internal:false ; (* Specific riscv PVM tezt *) - register_riscv ~protocols ; + register_riscv ~protocols:[Protocol.Alpha] ; (* Shared tezts - will be executed for each PVMs. *) register ~kind:"wasm_2_0_0" ~protocols ; register ~kind:"arith" ~protocols ; -- GitLab From 594964a665787eb8ceec9bc909880350e6b060bf Mon Sep 17 00:00:00 2001 From: Eugen Zalinescu Date: Wed, 5 Jun 2024 16:19:28 +0200 Subject: [PATCH 05/11] DAL/Doc: mention install_dal_trusted_setup.sh Co-Authored-By: Nic Volanschi --- docs/introduction/howtoget.rst | 18 ++++++++++++++++++ docs/shell/dal_node.rst | 7 +++++++ 2 files changed, 25 insertions(+) diff --git a/docs/introduction/howtoget.rst b/docs/introduction/howtoget.rst index c076c421632d..508b5882b9a5 100644 --- a/docs/introduction/howtoget.rst +++ b/docs/introduction/howtoget.rst @@ -31,6 +31,11 @@ There are several options for getting the binaries, depending on how you plan to These different options are described in the following sections. +Some Octez binaries also require some parameter files to run. Only some of the packaged distributions include such parameter files. Therefore, depending on the type of installation and your user profile, you may have to install some extra parameter files separately. Their installation is currently described in section :ref:`compiling_with_make`, but those instructions may be used for other installation types: + +- :ref:`setup_zcash_params` +- :ref:`setup_dal_crypto_params` + Note that some of the packaged distributions are not only available for the latest stable release. For instance, static binaries are also available for release candidates, and Docker images are also available for the current development version (see :doc:`../releases/releases` for more information). When choosing between the installation options, you may take into account the @@ -529,6 +534,19 @@ and ``sapling-output.params``. Here is where you should expect to find those fil Note that the script ``fetch-params.sh`` downloads a third file containing parameters for Sprout (currently called ``sprout-groth16.params``), which is not loaded by Sapling and can be deleted to save a significant amount of space (this file is *much* bigger than the two other files). +.. _setup_dal_crypto_params: + +Install DAL trusted setup +~~~~~~~~~~~~~~~~~~~~~~~~~ + +Users running :doc:`DAL<../shell/dal>` as :ref:`slot producers` +need to have a set of cryptographic parameters (known as an SRS) installed in +order to run their :doc:`DAL node<../shell/dal_node>`. The parameters can be +retrieved via the following script:: + + scripts/install_dal_trusted_setup.sh + + Get the sources ~~~~~~~~~~~~~~~ diff --git a/docs/shell/dal_node.rst b/docs/shell/dal_node.rst index 36276b7ebe5a..ab945ad95d85 100644 --- a/docs/shell/dal_node.rst +++ b/docs/shell/dal_node.rst @@ -20,6 +20,9 @@ Non-bootstrap DAL nodes distinguish themselves only in the topics they subscribe - Slot producers subscribe to all topics containing some specified slot indexes. - Bakers subscribe to all topics containing the attester identities they run for (for all possible slot indexes). +.. _dal_profiles: + + Profiles ~~~~~~~~ @@ -74,6 +77,10 @@ Both commands have the same arguments, which can be seen by executing, e.g., ``o See the :ref:`DAL node manual ` for more details. +In order to run a DAL node with a slot producer profile, one first needs to +install some cryptographic parameters, see the section on :ref:`Install DAL +trusted setup`. + DAL configuration of the L1 node ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -- GitLab From be4d3668597bdf724ca3a4c2a416469662385fd9 Mon Sep 17 00:00:00 2001 From: mbourgoin Date: Wed, 5 Jun 2024 14:46:54 +0200 Subject: [PATCH 06/11] lib_event_logging: Add alternative color option for logs This is currently unused --- src/lib_base/unix/file_descriptor_sink.ml | 22 ++++++- src/lib_event_logging/internal_event.ml | 62 ++++++++++++++----- src/lib_event_logging/internal_event.mli | 15 +++++ src/lib_rpc_http/RPC_client_unix.ml | 2 + .../lib_delegate/client_baking_blocks.ml | 2 + .../lib_delegate/client_baking_blocks.ml | 2 + .../lib_delegate/client_baking_blocks.ml | 2 + 7 files changed, 89 insertions(+), 18 deletions(-) diff --git a/src/lib_base/unix/file_descriptor_sink.ml b/src/lib_base/unix/file_descriptor_sink.ml index 117e25618665..e409280f8ae8 100644 --- a/src/lib_base/unix/file_descriptor_sink.ml +++ b/src/lib_base/unix/file_descriptor_sink.ml @@ -81,9 +81,21 @@ module Color = struct let bold_len = 4 module FG = struct + (* Error level default color *) let red = "\027[31m" + (* Warning level default color *) let yellow = "\027[33m" + + (* Alternative colors *) + + let green = "\027[32m" + + let blue = "\027[34m" + + let cyan = "\027[36m" + + let magenta = "\027[35m" end end @@ -619,6 +631,12 @@ end) : Internal_event.SINK with type t = t = struct | Some (_, None) -> (* exclude list *) false | Some (_, Some lvl) -> Internal_event.Level.compare M.level lvl >= 0) + let color = function + | Internal_event.Blue -> Some Color.FG.blue + | Internal_event.Cyan -> Some Color.FG.cyan + | Internal_event.Green -> Some Color.FG.green + | Internal_event.Magenta -> Some Color.FG.magenta + let level_color = function | Internal_event.Warning -> Some Color.FG.yellow | Error | Fatal -> Some Color.FG.red @@ -654,7 +672,9 @@ end) : Internal_event.SINK with type t = t = struct if colors then let*! color_compatible = output_color_compatible output in if color_compatible then - Lwt.return (Enabled (level_color M.level)) + match M.alternative_color with + | None -> Lwt.return (Enabled (level_color M.level)) + | Some c -> Lwt.return (Enabled (color c)) else Lwt.return Disabled else Lwt.return Disabled in diff --git a/src/lib_event_logging/internal_event.ml b/src/lib_event_logging/internal_event.ml index 6a09c564a753..7e7e58a175d1 100644 --- a/src/lib_event_logging/internal_event.ml +++ b/src/lib_event_logging/internal_event.ml @@ -189,6 +189,8 @@ let register_section section = registered_sections := String.Set.add (Section.name section) !registered_sections +type alternative_color = Magenta | Cyan | Green | Blue + module type EVENT_DEFINITION = sig type t @@ -203,6 +205,8 @@ module type EVENT_DEFINITION = sig val encoding : t Data_encoding.t val level : level + + val alternative_color : alternative_color option end module type EVENT = sig @@ -773,7 +777,7 @@ module Simple = struct ~name (Data_encoding.With_version.first_version encoding) - let declare_0 ?section ~name ~msg ?(level = Info) () = + let declare_0 ?alternative_color ?section ~name ~msg ?(level = Info) () = let section = make_section section in let parsed_msg = parse_msg [] msg in let module Definition : EVENT_DEFINITION with type t = unit = struct @@ -791,12 +795,14 @@ module Simple = struct let encoding = with_version ~name Data_encoding.unit let level = level + + let alternative_color = alternative_color end in let module Event = Make (Definition) in {name; emit = (fun () -> Event.emit ?section ())} - let declare_1 (type a) ?section ~name ~msg ?(level = Info) ?pp1 - (f1_name, (f1_enc : a Data_encoding.t)) = + let declare_1 (type a) ?alternative_color ?section ~name ~msg ?(level = Info) + ?pp1 (f1_name, (f1_enc : a Data_encoding.t)) = let section = make_section section in let parsed_msg = parse_msg [f1_name] msg in let module Definition : EVENT_DEFINITION with type t = a = struct @@ -819,12 +825,14 @@ module Simple = struct let encoding = with_version ~name f1_enc let level = level + + let alternative_color = alternative_color end in let module Event = Make (Definition) in {name; emit = (fun parameter -> Event.emit ?section parameter)} - let declare_2 (type a b) ?section ~name ~msg ?(level = Info) ?pp1 - (f1_name, (f1_enc : a Data_encoding.t)) ?pp2 + let declare_2 (type a b) ?alternative_color ?section ~name ~msg + ?(level = Info) ?pp1 (f1_name, (f1_enc : a Data_encoding.t)) ?pp2 (f2_name, (f2_enc : b Data_encoding.t)) = let section = make_section section in let parsed_msg = parse_msg [f1_name; f2_name] msg in @@ -855,12 +863,14 @@ module Simple = struct (Data_encoding.req f2_name f2_enc) let level = level + + let alternative_color = alternative_color end in let module Event = Make (Definition) in {name; emit = (fun parameters -> Event.emit ?section parameters)} - let declare_3 (type a b c) ?section ~name ~msg ?(level = Info) ?pp1 - (f1_name, (f1_enc : a Data_encoding.t)) ?pp2 + let declare_3 (type a b c) ?alternative_color ?section ~name ~msg + ?(level = Info) ?pp1 (f1_name, (f1_enc : a Data_encoding.t)) ?pp2 (f2_name, (f2_enc : b Data_encoding.t)) ?pp3 (f3_name, (f3_enc : c Data_encoding.t)) = let section = make_section section in @@ -894,12 +904,14 @@ module Simple = struct (Data_encoding.req f3_name f3_enc) let level = level + + let alternative_color = alternative_color end in let module Event = Make (Definition) in {name; emit = (fun parameters -> Event.emit ?section parameters)} - let declare_4 (type a b c d) ?section ~name ~msg ?(level = Info) ?pp1 - (f1_name, (f1_enc : a Data_encoding.t)) ?pp2 + let declare_4 (type a b c d) ?alternative_color ?section ~name ~msg + ?(level = Info) ?pp1 (f1_name, (f1_enc : a Data_encoding.t)) ?pp2 (f2_name, (f2_enc : b Data_encoding.t)) ?pp3 (f3_name, (f3_enc : c Data_encoding.t)) ?pp4 (f4_name, (f4_enc : d Data_encoding.t)) = @@ -937,12 +949,14 @@ module Simple = struct (Data_encoding.req f4_name f4_enc) let level = level + + let alternative_color = alternative_color end in let module Event = Make (Definition) in {name; emit = (fun parameters -> Event.emit ?section parameters)} - let declare_5 (type a b c d e) ?section ~name ~msg ?(level = Info) ?pp1 - (f1_name, (f1_enc : a Data_encoding.t)) ?pp2 + let declare_5 (type a b c d e) ?alternative_color ?section ~name ~msg + ?(level = Info) ?pp1 (f1_name, (f1_enc : a Data_encoding.t)) ?pp2 (f2_name, (f2_enc : b Data_encoding.t)) ?pp3 (f3_name, (f3_enc : c Data_encoding.t)) ?pp4 (f4_name, (f4_enc : d Data_encoding.t)) ?pp5 @@ -985,12 +999,14 @@ module Simple = struct (Data_encoding.req f5_name f5_enc) let level = level + + let alternative_color = alternative_color end in let module Event = Make (Definition) in {name; emit = (fun parameters -> Event.emit ?section parameters)} - let declare_6 (type a b c d e f) ?section ~name ~msg ?(level = Info) ?pp1 - (f1_name, (f1_enc : a Data_encoding.t)) ?pp2 + let declare_6 (type a b c d e f) ?alternative_color ?section ~name ~msg + ?(level = Info) ?pp1 (f1_name, (f1_enc : a Data_encoding.t)) ?pp2 (f2_name, (f2_enc : b Data_encoding.t)) ?pp3 (f3_name, (f3_enc : c Data_encoding.t)) ?pp4 (f4_name, (f4_enc : d Data_encoding.t)) ?pp5 @@ -1036,12 +1052,14 @@ module Simple = struct (Data_encoding.req f6_name f6_enc) let level = level + + let alternative_color = alternative_color end in let module Event = Make (Definition) in {name; emit = (fun parameters -> Event.emit ?section parameters)} - let declare_7 (type a b c d e f g) ?section ~name ~msg ?(level = Info) ?pp1 - (f1_name, (f1_enc : a Data_encoding.t)) ?pp2 + let declare_7 (type a b c d e f g) ?alternative_color ?section ~name ~msg + ?(level = Info) ?pp1 (f1_name, (f1_enc : a Data_encoding.t)) ?pp2 (f2_name, (f2_enc : b Data_encoding.t)) ?pp3 (f3_name, (f3_enc : c Data_encoding.t)) ?pp4 (f4_name, (f4_enc : d Data_encoding.t)) ?pp5 @@ -1092,12 +1110,14 @@ module Simple = struct (Data_encoding.req f7_name f7_enc) let level = level + + let alternative_color = alternative_color end in let module Event = Make (Definition) in {name; emit = (fun parameters -> Event.emit ?section parameters)} - let declare_8 (type a b c d e f g h) ?section ~name ~msg ?(level = Info) ?pp1 - (f1_name, (f1_enc : a Data_encoding.t)) ?pp2 + let declare_8 (type a b c d e f g h) ?alternative_color ?section ~name ~msg + ?(level = Info) ?pp1 (f1_name, (f1_enc : a Data_encoding.t)) ?pp2 (f2_name, (f2_enc : b Data_encoding.t)) ?pp3 (f3_name, (f3_enc : c Data_encoding.t)) ?pp4 (f4_name, (f4_enc : d Data_encoding.t)) ?pp5 @@ -1151,6 +1171,8 @@ module Simple = struct (Data_encoding.req f8_name f8_enc) let level = level + + let alternative_color = alternative_color end in let module Event = Make (Definition) in {name; emit = (fun parameters -> Event.emit ?section parameters)} @@ -1171,6 +1193,8 @@ module Lwt_worker_logger = struct let doc = "Worker started event" let level = Debug + + let alternative_color = None end) module Ended_event = Make (struct @@ -1187,6 +1211,8 @@ module Lwt_worker_logger = struct let doc = "Worker ended event" let level = Debug + + let alternative_color = None end) module Failed_event = Make (struct @@ -1204,6 +1230,8 @@ module Lwt_worker_logger = struct let doc = "Worker failed event" let level = Error + + let alternative_color = None end) let on_event name event = diff --git a/src/lib_event_logging/internal_event.mli b/src/lib_event_logging/internal_event.mli index ddf4dc63f8d2..0b61a311c358 100644 --- a/src/lib_event_logging/internal_event.mli +++ b/src/lib_event_logging/internal_event.mli @@ -97,6 +97,10 @@ val get_registered_sections : unit -> string Seq.t val register_section : Section.t -> unit +(** Alternative colors usable in console logs. Yellow and Red are already used + for Warning/alerts log levels*) +type alternative_color = Magenta | Cyan | Green | Blue + (** Parameters defining an inspectable type of events. *) module type EVENT_DEFINITION = sig type t @@ -127,6 +131,8 @@ module type EVENT_DEFINITION = sig (** Return the preferred {!type-level} for a given event instance. *) val level : level + + val alternative_color : alternative_color option end (** Events created with {!Make} provide the {!EVENT} API. *) @@ -259,6 +265,7 @@ module Simple : sig (** Declare an event with no parameters. *) val declare_0 : + ?alternative_color:alternative_color -> ?section:string list -> name:string -> msg:string -> @@ -268,6 +275,7 @@ module Simple : sig (** Declare an event with one parameter. *) val declare_1 : + ?alternative_color:alternative_color -> ?section:string list -> name:string -> msg:string -> @@ -278,6 +286,7 @@ module Simple : sig (** Declare an event with two parameters. *) val declare_2 : + ?alternative_color:alternative_color -> ?section:string list -> name:string -> msg:string -> @@ -290,6 +299,7 @@ module Simple : sig (** Declare an event with three parameters. *) val declare_3 : + ?alternative_color:alternative_color -> ?section:string list -> name:string -> msg:string -> @@ -304,6 +314,7 @@ module Simple : sig (** Declare an event with four parameters. *) val declare_4 : + ?alternative_color:alternative_color -> ?section:string list -> name:string -> msg:string -> @@ -320,6 +331,7 @@ module Simple : sig (** Declare an event with five parameters. *) val declare_5 : + ?alternative_color:alternative_color -> ?section:string list -> name:string -> msg:string -> @@ -338,6 +350,7 @@ module Simple : sig (** Declare an event with six parameters. *) val declare_6 : + ?alternative_color:alternative_color -> ?section:string list -> name:string -> msg:string -> @@ -358,6 +371,7 @@ module Simple : sig (** Declare an event with seven parameters. *) val declare_7 : + ?alternative_color:alternative_color -> ?section:string list -> name:string -> msg:string -> @@ -380,6 +394,7 @@ module Simple : sig (** Declare an event with eight parameters. *) val declare_8 : + ?alternative_color:alternative_color -> ?section:string list -> name:string -> msg:string -> diff --git a/src/lib_rpc_http/RPC_client_unix.ml b/src/lib_rpc_http/RPC_client_unix.ml index 17201d84208c..3ca60c91e657 100644 --- a/src/lib_rpc_http/RPC_client_unix.ml +++ b/src/lib_rpc_http/RPC_client_unix.ml @@ -52,6 +52,8 @@ module Attempt_logging = Internal_event.Make (struct text let level = Internal_event.Error + + let alternative_color = None end) module RetryClient : diff --git a/src/proto_018_Proxford/lib_delegate/client_baking_blocks.ml b/src/proto_018_Proxford/lib_delegate/client_baking_blocks.ml index ece92bc3bb67..360bf2e0bec5 100644 --- a/src/proto_018_Proxford/lib_delegate/client_baking_blocks.ml +++ b/src/proto_018_Proxford/lib_delegate/client_baking_blocks.ml @@ -143,6 +143,8 @@ module Block_seen_event = struct let doc = "Block observed while monitoring a blockchain." let level = Internal_event.Info + + let alternative_color = None end module Event = Internal_event.Make (Definition) diff --git a/src/proto_019_PtParisB/lib_delegate/client_baking_blocks.ml b/src/proto_019_PtParisB/lib_delegate/client_baking_blocks.ml index ece92bc3bb67..360bf2e0bec5 100644 --- a/src/proto_019_PtParisB/lib_delegate/client_baking_blocks.ml +++ b/src/proto_019_PtParisB/lib_delegate/client_baking_blocks.ml @@ -143,6 +143,8 @@ module Block_seen_event = struct let doc = "Block observed while monitoring a blockchain." let level = Internal_event.Info + + let alternative_color = None end module Event = Internal_event.Make (Definition) diff --git a/src/proto_alpha/lib_delegate/client_baking_blocks.ml b/src/proto_alpha/lib_delegate/client_baking_blocks.ml index ece92bc3bb67..360bf2e0bec5 100644 --- a/src/proto_alpha/lib_delegate/client_baking_blocks.ml +++ b/src/proto_alpha/lib_delegate/client_baking_blocks.ml @@ -143,6 +143,8 @@ module Block_seen_event = struct let doc = "Block observed while monitoring a blockchain." let level = Internal_event.Info + + let alternative_color = None end module Event = Internal_event.Make (Definition) -- GitLab From 74fd4468c8509abd0c83e2fa52339471b37663ae Mon Sep 17 00:00:00 2001 From: Anne-Laure Date: Wed, 15 May 2024 13:57:57 +0200 Subject: [PATCH 07/11] Tezt/Test/Cloud/Dal: disconnect & reconnect all bakers successively --- tezt/tests/cloud/dal.ml | 99 +++++++++++++++++++++++++++++++++++++++-- 1 file changed, 96 insertions(+), 3 deletions(-) diff --git a/tezt/tests/cloud/dal.ml b/tezt/tests/cloud/dal.ml index 3eb29bb3202f..ff45521e2409 100644 --- a/tezt/tests/cloud/dal.ml +++ b/tezt/tests/cloud/dal.ml @@ -8,6 +8,53 @@ module Cryptobox = Dal_common.Cryptobox module Helpers = Dal_common.Helpers +module Disconnected = struct + (* The number of block during which the baker stays disconnected *) + let time_disconnected = 10 + + (* Each [disconnect_frequency] blocks, a baker is disconnected *) + let disconnect_frequency = 8 + + module Map = Map.Make (struct + type t = int + + let compare = Int.compare + end) + + (* The state contains bakers indexes that have been disconnected associated + with the level at where they have been disconnected *) + let state = ref Map.empty + + (* The next baker to disconnected is stored there *) + let next_to_disconnect = ref 0 + + (* This function only does something when a relevant level is reached. In + that case, the next baker to disconnect is added with the current level in + the state and [f] is applied that baker *) + let disconnect level f = + if level mod disconnect_frequency <> 0 then Lwt.return_unit + else + match Map.find_opt !next_to_disconnect !state with + | Some _ -> Lwt.return_unit + | None -> + state := Map.add !next_to_disconnect level !state ; + let res = f !next_to_disconnect in + next_to_disconnect := !next_to_disconnect + 1 ; + res + + (* Applies [f] on the bakers that have been disconnected for long enough *) + let reconnect level f = + let bakers_to_reconnect, bakers_to_keep_disconnected = + Map.partition + (fun _ disco_level -> level >= disco_level + time_disconnected) + !state + in + state := bakers_to_keep_disconnected ; + Map.to_seq bakers_to_reconnect + |> List.of_seq + |> Lwt_list.iter_p (fun (b, _) -> f b) +end + module Cli = struct let section = Clap.section @@ -888,7 +935,39 @@ let init ~(configuration : configuration) cloud next_agent = metrics; } -let on_new_level t level = +let wait_for_gossipsub_worker_event ~name dal_node lambda = + Dal_node.wait_for dal_node (sf "gossipsub_worker_event-%s.v0" name) lambda + +let check_expected expected found = if expected <> found then None else Some () + +let ( let*?? ) a b = Option.bind a b + +let check_new_connection_event ~main_node ~other_node ~is_trusted = + let id = Dal_node.read_identity other_node in + wait_for_gossipsub_worker_event ~name:"new_connection" main_node (fun event -> + let*?? () = check_expected id JSON.(event |-> "peer" |> as_string) in + check_expected is_trusted JSON.(event |-> "trusted" |> as_bool)) + +(** Connect [dal_node1] and [dal_node2] using the bootstrap peer mechanism. + [dal_node2] will use [dal_node1] as a bootstrap peer. + + For this to work, [dal_node1] must already be running. *) +let connect_nodes_via_p2p dal_node1 dal_node2 = + (* We ensure that [dal_node1] connects to [dal_node2]. *) + let conn_ev_in_node1 = + check_new_connection_event + ~main_node:dal_node1 + ~other_node:dal_node2 + ~is_trusted:false + in + let* () = Dal_node.run dal_node2 in + Log.info + "Node %s started. Waiting for connection with node %s" + (Dal_node.name dal_node2) + (Dal_node.name dal_node1) ; + conn_ev_in_node1 + +let on_new_level ?(disconnect = false) t level = let node = t.bootstrap.node in let client = t.bootstrap.client in let* () = @@ -904,7 +983,21 @@ let on_new_level t level = pp_metrics t metrics ; push_metrics t metrics ; Hashtbl.replace t.metrics level metrics ; - Lwt.return_unit + if not disconnect then Lwt.return_unit + else + let nb_bakers = List.length t.bakers in + let* () = + Disconnected.disconnect level (fun b -> + let baker_to_disconnect = + (List.nth t.bakers (b mod nb_bakers)).dal_node + in + Dal_node.terminate baker_to_disconnect) + in + Disconnected.reconnect level (fun b -> + let baker_to_reconnect = + (List.nth t.bakers (b mod nb_bakers)).dal_node + in + connect_nodes_via_p2p t.bootstrap.dal_node baker_to_reconnect) let produce_slot t level i = let producer = List.nth t.producers i in @@ -940,7 +1033,7 @@ let producers_not_ready t = List.for_all producer_ready t.producers let rec loop t level = - let p = on_new_level t level in + let p = on_new_level ~disconnect:true t level in let _p2 = if producers_not_ready t then Lwt.return_unit else -- GitLab From 4989d5d9630b395b3b741d69001a0bd322f010ca Mon Sep 17 00:00:00 2001 From: Anne-Laure Date: Thu, 30 May 2024 19:15:38 +0200 Subject: [PATCH 08/11] Tezt/Test/Cloud/Dal: add a disconnection argument in Cli --- tezt/tests/cloud/dal.ml | 72 +++++++++++++++++++++++++++-------------- 1 file changed, 47 insertions(+), 25 deletions(-) diff --git a/tezt/tests/cloud/dal.ml b/tezt/tests/cloud/dal.ml index ff45521e2409..fac3eb7fb7f1 100644 --- a/tezt/tests/cloud/dal.ml +++ b/tezt/tests/cloud/dal.ml @@ -9,12 +9,6 @@ module Cryptobox = Dal_common.Cryptobox module Helpers = Dal_common.Helpers module Disconnected = struct - (* The number of block during which the baker stays disconnected *) - let time_disconnected = 10 - - (* Each [disconnect_frequency] blocks, a baker is disconnected *) - let disconnect_frequency = 8 - module Map = Map.Make (struct type t = int @@ -31,8 +25,8 @@ module Disconnected = struct (* This function only does something when a relevant level is reached. In that case, the next baker to disconnect is added with the current level in the state and [f] is applied that baker *) - let disconnect level f = - if level mod disconnect_frequency <> 0 then Lwt.return_unit + let disconnect frequency level f = + if level mod frequency <> 0 then Lwt.return_unit else match Map.find_opt !next_to_disconnect !state with | Some _ -> Lwt.return_unit @@ -43,10 +37,10 @@ module Disconnected = struct res (* Applies [f] on the bakers that have been disconnected for long enough *) - let reconnect level f = + let reconnect reconnection_delay level f = let bakers_to_reconnect, bakers_to_keep_disconnected = Map.partition - (fun _ disco_level -> level >= disco_level + time_disconnected) + (fun _ disco_level -> level >= disco_level + reconnection_delay) !state in state := bakers_to_keep_disconnected ; @@ -158,6 +152,30 @@ module Cli = struct ~description:"Specify the economic protocol used for this test" protocol_typ Protocol.Alpha + + let disconnect = + let disconnect_typ = + let parse string = + try + match string |> String.split_on_char ',' with + | [disconnection; reconnection] -> + Some (int_of_string disconnection, int_of_string reconnection) + | _ -> None + with _ -> None + in + let show (d, r) = Format.sprintf "%d,%d" d r in + Clap.typ ~name:"disconnect" ~dummy:(10, 10) ~parse ~show + in + Clap.optional + ~section + ~long:"disconnect" + ~placeholder:"," + ~description: + "If this argument is provided, bakers will disconnect in turn each \ + levels, and each will reconnect after a delay \ + of levels." + disconnect_typ + () end type configuration = { @@ -167,6 +185,7 @@ type configuration = { observer_slot_indices : int list; protocol : Protocol.t; producer_machine_type : string option; + disconnect : (int * int) option; } type bootstrap = {node : Node.t; dal_node : Dal_node.t; client : Client.t} @@ -967,7 +986,7 @@ let connect_nodes_via_p2p dal_node1 dal_node2 = (Dal_node.name dal_node1) ; conn_ev_in_node1 -let on_new_level ?(disconnect = false) t level = +let on_new_level t level = let node = t.bootstrap.node in let client = t.bootstrap.client in let* () = @@ -983,21 +1002,22 @@ let on_new_level ?(disconnect = false) t level = pp_metrics t metrics ; push_metrics t metrics ; Hashtbl.replace t.metrics level metrics ; - if not disconnect then Lwt.return_unit - else - let nb_bakers = List.length t.bakers in - let* () = - Disconnected.disconnect level (fun b -> - let baker_to_disconnect = + match t.configuration.disconnect with + | None -> Lwt.return_unit + | Some (disconnect_frequency, time_disconnected) -> + let nb_bakers = List.length t.bakers in + let* () = + Disconnected.disconnect disconnect_frequency level (fun b -> + let baker_to_disconnect = + (List.nth t.bakers (b mod nb_bakers)).dal_node + in + Dal_node.terminate baker_to_disconnect) + in + Disconnected.reconnect time_disconnected level (fun b -> + let baker_to_reconnect = (List.nth t.bakers (b mod nb_bakers)).dal_node in - Dal_node.terminate baker_to_disconnect) - in - Disconnected.reconnect level (fun b -> - let baker_to_reconnect = - (List.nth t.bakers (b mod nb_bakers)).dal_node - in - connect_nodes_via_p2p t.bootstrap.dal_node baker_to_reconnect) + connect_nodes_via_p2p t.bootstrap.dal_node baker_to_reconnect) let produce_slot t level i = let producer = List.nth t.producers i in @@ -1033,7 +1053,7 @@ let producers_not_ready t = List.for_all producer_ready t.producers let rec loop t level = - let p = on_new_level ~disconnect:true t level in + let p = on_new_level t level in let _p2 = if producers_not_ready t then Lwt.return_unit else @@ -1052,6 +1072,7 @@ let configuration = let observer_slot_indices = Cli.observer_slot_indices in let protocol = Cli.protocol in let producer_machine_type = Cli.producer_machine_type in + let disconnect = Cli.disconnect in { stake; stake_machine_type; @@ -1059,6 +1080,7 @@ let configuration = observer_slot_indices; protocol; producer_machine_type; + disconnect; } let benchmark () = -- GitLab From 4de76ff43394472b5b87f13cce9842014dc21d2f Mon Sep 17 00:00:00 2001 From: Anne-Laure Date: Fri, 31 May 2024 15:49:33 +0200 Subject: [PATCH 09/11] Tezt/Test/Cloud/Dal: disconnection is handled in type t --- tezt/tests/cloud/dal.ml | 104 ++++++++++++++++++++++++++-------------- 1 file changed, 69 insertions(+), 35 deletions(-) diff --git a/tezt/tests/cloud/dal.ml b/tezt/tests/cloud/dal.ml index fac3eb7fb7f1..3ff42ecf207f 100644 --- a/tezt/tests/cloud/dal.ml +++ b/tezt/tests/cloud/dal.ml @@ -8,45 +8,71 @@ module Cryptobox = Dal_common.Cryptobox module Helpers = Dal_common.Helpers -module Disconnected = struct +module Disconnect = struct module Map = Map.Make (struct type t = int let compare = Int.compare end) - (* The state contains bakers indexes that have been disconnected associated - with the level at where they have been disconnected *) - let state = ref Map.empty + (* The [state] contains bakers indexes that have been disconnected associated + with the level at which they have been disconnected + Each [frequency] number of levels, a baker, chosen in a round-robin + fashion, is disconnected. + A disconnected baker reconnects after [reconnection_delay] levels. + The next baker to disconnect is stored in [next_to_disconnect]; + it is 0 when no baker has been disconnected yet *) + type t = { + disconnected_bakers : int Map.t; + frequency : int; + reconnection_delay : int; + next_to_disconnect : int; + } - (* The next baker to disconnected is stored there *) - let next_to_disconnect = ref 0 + let init (frequency, reconnection_delay) = + { + disconnected_bakers = Map.empty; + frequency; + reconnection_delay; + next_to_disconnect = 0; + } - (* This function only does something when a relevant level is reached. In - that case, the next baker to disconnect is added with the current level in - the state and [f] is applied that baker *) - let disconnect frequency level f = - if level mod frequency <> 0 then Lwt.return_unit + (* When a relevant level is reached, [disconnect t level f] put the baker of + index [t.next_to_disconnect] in [t.disconnected_bakers] and applies [f] to + this baker. If it is already disconnected, the function does nothing and + returns [t] unchanged *) + let disconnect t level f = + if level mod t.frequency <> 0 then Lwt.return t else - match Map.find_opt !next_to_disconnect !state with - | Some _ -> Lwt.return_unit + match Map.find_opt t.next_to_disconnect t.disconnected_bakers with + | Some _ -> + Log.info + "disconnect: all bakers have been disconnected, waiting for next \ + baker to reconnect." ; + Lwt.return t | None -> - state := Map.add !next_to_disconnect level !state ; - let res = f !next_to_disconnect in - next_to_disconnect := !next_to_disconnect + 1 ; - res + let* () = f t.next_to_disconnect in + Lwt.return + { + t with + disconnected_bakers = + Map.add t.next_to_disconnect level t.disconnected_bakers; + next_to_disconnect = t.next_to_disconnect + 1; + } (* Applies [f] on the bakers that have been disconnected for long enough *) - let reconnect reconnection_delay level f = + let reconnect t level f = let bakers_to_reconnect, bakers_to_keep_disconnected = Map.partition - (fun _ disco_level -> level >= disco_level + reconnection_delay) - !state + (fun _ disco_level -> level >= disco_level + t.reconnection_delay) + t.disconnected_bakers + in + let* () = + Map.to_seq bakers_to_reconnect + |> List.of_seq + |> Lwt_list.iter_p (fun (b, _) -> f b) in - state := bakers_to_keep_disconnected ; - Map.to_seq bakers_to_reconnect - |> List.of_seq - |> Lwt_list.iter_p (fun (b, _) -> f b) + Lwt.return {t with disconnected_bakers = bakers_to_keep_disconnected} end module Cli = struct @@ -254,6 +280,7 @@ type t = { parameters : Dal_common.Parameters.t; infos : (int, per_level_info) Hashtbl.t; metrics : (int, metrics) Hashtbl.t; + disconnection_state : Disconnect.t option; } let pp_metrics t @@ -941,6 +968,9 @@ let init ~(configuration : configuration) cloud next_agent = let infos = Hashtbl.create 101 in let metrics = Hashtbl.create 101 in Hashtbl.replace metrics 1 default_metrics ; + let disconnection_state = + Option.map Disconnect.init configuration.disconnect + in Lwt.return { cloud; @@ -952,6 +982,7 @@ let init ~(configuration : configuration) cloud next_agent = parameters; infos; metrics; + disconnection_state; } let wait_for_gossipsub_worker_event ~name dal_node lambda = @@ -1002,22 +1033,25 @@ let on_new_level t level = pp_metrics t metrics ; push_metrics t metrics ; Hashtbl.replace t.metrics level metrics ; - match t.configuration.disconnect with - | None -> Lwt.return_unit - | Some (disconnect_frequency, time_disconnected) -> + match t.disconnection_state with + | None -> Lwt.return t + | Some disconnection_state -> let nb_bakers = List.length t.bakers in - let* () = - Disconnected.disconnect disconnect_frequency level (fun b -> + let* disconnection_state = + Disconnect.disconnect disconnection_state level (fun b -> let baker_to_disconnect = (List.nth t.bakers (b mod nb_bakers)).dal_node in Dal_node.terminate baker_to_disconnect) in - Disconnected.reconnect time_disconnected level (fun b -> - let baker_to_reconnect = - (List.nth t.bakers (b mod nb_bakers)).dal_node - in - connect_nodes_via_p2p t.bootstrap.dal_node baker_to_reconnect) + let* disconnection_state = + Disconnect.reconnect disconnection_state level (fun b -> + let baker_to_reconnect = + (List.nth t.bakers (b mod nb_bakers)).dal_node + in + connect_nodes_via_p2p t.bootstrap.dal_node baker_to_reconnect) + in + Lwt.return {t with disconnection_state = Some disconnection_state} let produce_slot t level i = let producer = List.nth t.producers i in @@ -1062,7 +1096,7 @@ let rec loop t level = |> Seq.map (fun i -> produce_slot t level i) |> List.of_seq |> Lwt.join in - let* () = p in + let* t = p in loop t (level + 1) let configuration = -- GitLab From e55ed54759164349ac2773a14ea9ac784c1ac13f Mon Sep 17 00:00:00 2001 From: Anne-Laure Date: Fri, 7 Jun 2024 10:04:21 +0200 Subject: [PATCH 10/11] Dal_common: factorize some code of tezt/dal & tezt/cloud/dal --- tezt/lib_tezos/dal_common.ml | 59 +++++++++++++++++++++++ tezt/lib_tezos/dal_common.mli | 22 +++++++++ tezt/tests/cloud/dal.ml | 36 ++------------ tezt/tests/dal.ml | 91 +++++++++-------------------------- 4 files changed, 107 insertions(+), 101 deletions(-) diff --git a/tezt/lib_tezos/dal_common.ml b/tezt/lib_tezos/dal_common.ml index 09160e3475b7..389e1275afb4 100644 --- a/tezt/lib_tezos/dal_common.ml +++ b/tezt/lib_tezos/dal_common.ml @@ -645,6 +645,65 @@ module Helpers = struct client in return commitment_string + + let wait_for_gossipsub_worker_event ~name dal_node lambda = + Dal_node.wait_for dal_node (sf "gossipsub_worker_event-%s.v0" name) lambda + + let check_new_connection_event ~main_node ?other_peer_id ~other_node + ~is_trusted () = + let ( let*?? ) a b = Option.bind a b in + let check_expected expected found = + if expected <> found then None else Some () + in + let* peer_id = + wait_for_gossipsub_worker_event + ~name:"new_connection" + main_node + (fun event -> + let*?? peer_id = JSON.(event |-> "peer" |> as_string_opt) in + let*?? () = + check_expected is_trusted JSON.(event |-> "trusted" |> as_bool) + in + match other_peer_id with + | None -> + (* No expected peer id, event is considered valid and its peer id is returned *) + Some peer_id + | Some other_peer_id -> + if other_peer_id = peer_id then Some peer_id + else + (* A connection was received from an unexpected peer, + discard the event. *) + None) + in + let* other_node_id = Dal_node.read_identity other_node in + let () = + Check.(peer_id = other_node_id) + ~__LOC__ + Check.string + ~error_msg:"Expected a connection from the peer of id %R, got %L." + in + unit + + let connect_nodes_via_p2p ?(init_config = false) dal_node1 dal_node2 = + let* () = + if init_config then + Dal_node.init_config ~peers:[Dal_node.listen_addr dal_node1] dal_node2 + else Lwt.return_unit + in + (* We ensure that [dal_node1] connects to [dal_node2]. *) + let conn_ev_in_node1 = + check_new_connection_event + ~main_node:dal_node1 + ~other_node:dal_node2 + ~is_trusted:false + () + in + let* () = Dal_node.run dal_node2 in + Log.info + "Node %s started. Waiting for connection with node %s" + (Dal_node.name dal_node2) + (Dal_node.name dal_node1) ; + conn_ev_in_node1 end module Check = struct diff --git a/tezt/lib_tezos/dal_common.mli b/tezt/lib_tezos/dal_common.mli index 4182c3715886..f22b5f07982e 100644 --- a/tezt/lib_tezos/dal_common.mli +++ b/tezt/lib_tezos/dal_common.mli @@ -120,6 +120,28 @@ module Helpers : sig | `Prover_SRS_not_loaded | `Invalid_shard ] -> unit + + (** Wait for a connection event between [main_node] and + [other_node]. The optional argument [other_peer_id] can be used to + ignore the connection events which are not between these two + nodes. When this optional argument is given, it must be the peer + of [other_node]; this assumption is checked by this function + after the reception of the connection event. *) + val check_new_connection_event : + main_node:Dal_node.t -> + ?other_peer_id:string -> + other_node:Dal_node.t -> + is_trusted:bool -> + unit -> + unit Lwt.t + + (** Connect [dal_node1] and [dal_node2] using the bootstrap peer mechanism. + [dal_node2] will use [dal_node1] as a bootstrap peer. + For this to work, [dal_node1] must already be running. + If [init_config] (false by default) is set to true, [Dal_node.init_config] + will be performed for [dal_node2] with [dal_node1] as peer *) + val connect_nodes_via_p2p : + ?init_config:bool -> Dal_node.t -> Dal_node.t -> unit Lwt.t end module RPC : sig diff --git a/tezt/tests/cloud/dal.ml b/tezt/tests/cloud/dal.ml index 3ff42ecf207f..0f2d1e597dcc 100644 --- a/tezt/tests/cloud/dal.ml +++ b/tezt/tests/cloud/dal.ml @@ -985,38 +985,6 @@ let init ~(configuration : configuration) cloud next_agent = disconnection_state; } -let wait_for_gossipsub_worker_event ~name dal_node lambda = - Dal_node.wait_for dal_node (sf "gossipsub_worker_event-%s.v0" name) lambda - -let check_expected expected found = if expected <> found then None else Some () - -let ( let*?? ) a b = Option.bind a b - -let check_new_connection_event ~main_node ~other_node ~is_trusted = - let id = Dal_node.read_identity other_node in - wait_for_gossipsub_worker_event ~name:"new_connection" main_node (fun event -> - let*?? () = check_expected id JSON.(event |-> "peer" |> as_string) in - check_expected is_trusted JSON.(event |-> "trusted" |> as_bool)) - -(** Connect [dal_node1] and [dal_node2] using the bootstrap peer mechanism. - [dal_node2] will use [dal_node1] as a bootstrap peer. - - For this to work, [dal_node1] must already be running. *) -let connect_nodes_via_p2p dal_node1 dal_node2 = - (* We ensure that [dal_node1] connects to [dal_node2]. *) - let conn_ev_in_node1 = - check_new_connection_event - ~main_node:dal_node1 - ~other_node:dal_node2 - ~is_trusted:false - in - let* () = Dal_node.run dal_node2 in - Log.info - "Node %s started. Waiting for connection with node %s" - (Dal_node.name dal_node2) - (Dal_node.name dal_node1) ; - conn_ev_in_node1 - let on_new_level t level = let node = t.bootstrap.node in let client = t.bootstrap.client in @@ -1049,7 +1017,9 @@ let on_new_level t level = let baker_to_reconnect = (List.nth t.bakers (b mod nb_bakers)).dal_node in - connect_nodes_via_p2p t.bootstrap.dal_node baker_to_reconnect) + Dal_common.Helpers.connect_nodes_via_p2p + t.bootstrap.dal_node + baker_to_reconnect) in Lwt.return {t with disconnection_state = Some disconnection_state} diff --git a/tezt/tests/dal.ml b/tezt/tests/dal.ml index 746d92570c48..bc00b30fbc0c 100644 --- a/tezt/tests/dal.ml +++ b/tezt/tests/dal.ml @@ -3214,43 +3214,6 @@ let check_expected expected found = if expected <> found then None else Some () let ( let*?? ) a b = Option.bind a b -(** Wait for a connection event between [main_node] and - [other_node]. The optional argument [other_peer_id] can be used to - ignore the connection events which are not between these two - nodes. When this optional argument is given, it must be the peer - of [other_node]; this assumption is checked by this function - after the reception of the connection event. *) -let check_new_connection_event ~main_node ?other_peer_id ~other_node ~is_trusted - () = - let* peer_id = - wait_for_gossipsub_worker_event - ~name:"new_connection" - main_node - (fun event -> - let*?? peer_id = JSON.(event |-> "peer" |> as_string_opt) in - let*?? () = - check_expected is_trusted JSON.(event |-> "trusted" |> as_bool) - in - match other_peer_id with - | None -> - (* No expected peer id, event is considered valid and its peer id is returned *) - Some peer_id - | Some other_peer_id -> - if other_peer_id = peer_id then Some peer_id - else - (* A connection was received from an unexpected peer, - discard the event. *) - None) - in - let* other_node_id = Dal_node.read_identity other_node in - let () = - Check.(peer_id = other_node_id) - ~__LOC__ - Check.string - ~error_msg:"Expected a connection from the peer of id %R, got %L." - in - unit - let check_disconnection_event dal_node ~peer_id = wait_for_gossipsub_worker_event ~name:"disconnection" @@ -3502,29 +3465,6 @@ let check_message_notified_to_app_event dal_node ~number_of_shards let () = remaining := !remaining - 1 in if !remaining = 0 && all_seen () then Some () else None) -(** Connect [dal_node1] and [dal_node2] using the bootstrap peer mechanism. - [dal_node2] will use [dal_node1] as a bootstrap peer. - - For this to work, [dal_node1] must already be running. *) -let connect_nodes_via_p2p dal_node1 dal_node2 = - let* () = - Dal_node.init_config ~peers:[Dal_node.listen_addr dal_node1] dal_node2 - in - (* We ensure that [dal_node1] connects to [dal_node2]. *) - let conn_ev_in_node1 = - check_new_connection_event - ~main_node:dal_node1 - ~other_node:dal_node2 - ~is_trusted:false - () - in - let* () = Dal_node.run dal_node2 in - Log.info - "Node %s started. Waiting for connection with node %s" - (Dal_node.name dal_node2) - (Dal_node.name dal_node1) ; - conn_ev_in_node1 - (** This helper function makes the nodes [dal_node1] and [dal_node2] join the topics of the attester [pkh], by calling the RPC for tracking the corresponding profile. The second node calls the RPC only after receiving the Subscribe messages @@ -3635,7 +3575,12 @@ let test_dal_node_p2p_connection_and_disconnection _protocol _parameters _cryptobox node _client dal_node1 = let dal_node2 = Dal_node.create ~node () in (* Connect the nodes *) - let* () = connect_nodes_via_p2p dal_node1 dal_node2 in + let* () = + Dal_common.Helpers.connect_nodes_via_p2p + ~init_config:true + dal_node1 + dal_node2 + in let* peer_id = Dal_node.read_identity dal_node2 in (* kill dal_node2 and check "disconnection" event in node1. *) let disconn_ev_in_node1 = check_disconnection_event dal_node1 ~peer_id in @@ -3675,7 +3620,12 @@ let generic_gs_messages_exchange protocol parameters _cryptobox node client dal_node1 ~mk_dal_node2 ~expect_app_notification ~is_first_slot_attestable = let* dal_node2 = mk_dal_node2 protocol parameters in - let* () = connect_nodes_via_p2p dal_node1 dal_node2 in + let* () = + Dal_common.Helpers.connect_nodes_via_p2p + ~init_config:true + dal_node1 + dal_node2 + in let num_slots = parameters.Dal.Parameters.number_of_slots in let number_of_shards = parameters.Dal.Parameters.cryptobox.number_of_shards in @@ -3861,7 +3811,12 @@ let test_gs_prune_and_ihave protocol parameters _cryptobox node client dal_node1 let* dal_node2 = make_invalid_dal_node protocol parameters in (* Connect the nodes *) - let* () = connect_nodes_via_p2p dal_node1 dal_node2 in + let* () = + Dal_common.Helpers.connect_nodes_via_p2p + ~init_config:true + dal_node1 + dal_node2 + in let num_slots = parameters.number_of_slots in let account1 = Constant.bootstrap1 in @@ -3978,14 +3933,14 @@ let observe_nodes_connection_via_bootstrap ?(extra_nodes_to_restart = []) client let nodes = dal_node2 :: dal_node3 :: extra_nodes_to_restart in let* () = List.map Dal_node.terminate nodes |> Lwt.join in let check_conn_event_from_2_to_3 = - check_new_connection_event + Dal_common.Helpers.check_new_connection_event ~main_node:dal_node2 ~other_node:dal_node3 ~is_trusted:false () in let check_conn_event_from_3_to_2 = - check_new_connection_event + Dal_common.Helpers.check_new_connection_event ~main_node:dal_node3 ~other_node:dal_node2 ~is_trusted:false @@ -4078,7 +4033,7 @@ let test_peers_reconnection _protocol _parameters _cryptobox node client (* Prepare reconnection events checks between node1 and node2 (resp. 3). *) let check_conn_event_from_1_to_2 = - check_new_connection_event + Dal_common.Helpers.check_new_connection_event ~main_node:dal_node1 ~other_node:dal_node2 ~is_trusted:false @@ -4086,7 +4041,7 @@ let test_peers_reconnection _protocol _parameters _cryptobox node client () in let check_conn_event_from_1_to_3 = - check_new_connection_event + Dal_common.Helpers.check_new_connection_event ~main_node:dal_node1 ~other_node:dal_node3 ~is_trusted:false @@ -4094,7 +4049,7 @@ let test_peers_reconnection _protocol _parameters _cryptobox node client () in let check_conn_event_from_2_to_3 = - check_new_connection_event + Dal_common.Helpers.check_new_connection_event ~main_node:dal_node2 ~other_node:dal_node3 ~is_trusted:false -- GitLab From 4aab1d18ee61328ce7bec3d7e6b75e8a084cacc2 Mon Sep 17 00:00:00 2001 From: Anne-Laure Date: Fri, 7 Jun 2024 15:27:30 +0200 Subject: [PATCH 11/11] Dal: split file in Tezt/Cloud --- tezt/tests/cloud/dal.ml | 585 ++------------------------------- tezt/tests/cloud/disconnect.ml | 72 ++++ tezt/tests/cloud/metrics.ml | 410 +++++++++++++++++++++++ tezt/tests/cloud/types.ml | 72 ++++ 4 files changed, 578 insertions(+), 561 deletions(-) create mode 100644 tezt/tests/cloud/disconnect.ml create mode 100644 tezt/tests/cloud/metrics.ml create mode 100644 tezt/tests/cloud/types.ml diff --git a/tezt/tests/cloud/dal.ml b/tezt/tests/cloud/dal.ml index 0f2d1e597dcc..de07774a0f19 100644 --- a/tezt/tests/cloud/dal.ml +++ b/tezt/tests/cloud/dal.ml @@ -8,73 +8,6 @@ module Cryptobox = Dal_common.Cryptobox module Helpers = Dal_common.Helpers -module Disconnect = struct - module Map = Map.Make (struct - type t = int - - let compare = Int.compare - end) - - (* The [state] contains bakers indexes that have been disconnected associated - with the level at which they have been disconnected - Each [frequency] number of levels, a baker, chosen in a round-robin - fashion, is disconnected. - A disconnected baker reconnects after [reconnection_delay] levels. - The next baker to disconnect is stored in [next_to_disconnect]; - it is 0 when no baker has been disconnected yet *) - type t = { - disconnected_bakers : int Map.t; - frequency : int; - reconnection_delay : int; - next_to_disconnect : int; - } - - let init (frequency, reconnection_delay) = - { - disconnected_bakers = Map.empty; - frequency; - reconnection_delay; - next_to_disconnect = 0; - } - - (* When a relevant level is reached, [disconnect t level f] put the baker of - index [t.next_to_disconnect] in [t.disconnected_bakers] and applies [f] to - this baker. If it is already disconnected, the function does nothing and - returns [t] unchanged *) - let disconnect t level f = - if level mod t.frequency <> 0 then Lwt.return t - else - match Map.find_opt t.next_to_disconnect t.disconnected_bakers with - | Some _ -> - Log.info - "disconnect: all bakers have been disconnected, waiting for next \ - baker to reconnect." ; - Lwt.return t - | None -> - let* () = f t.next_to_disconnect in - Lwt.return - { - t with - disconnected_bakers = - Map.add t.next_to_disconnect level t.disconnected_bakers; - next_to_disconnect = t.next_to_disconnect + 1; - } - - (* Applies [f] on the bakers that have been disconnected for long enough *) - let reconnect t level f = - let bakers_to_reconnect, bakers_to_keep_disconnected = - Map.partition - (fun _ disco_level -> level >= disco_level + t.reconnection_delay) - t.disconnected_bakers - in - let* () = - Map.to_seq bakers_to_reconnect - |> List.of_seq - |> Lwt_list.iter_p (fun (b, _) -> f b) - in - Lwt.return {t with disconnected_bakers = bakers_to_keep_disconnected} -end - module Cli = struct let section = Clap.section @@ -204,469 +137,6 @@ module Cli = struct () end -type configuration = { - stake : int list; - stake_machine_type : string list option; - dal_node_producer : int; - observer_slot_indices : int list; - protocol : Protocol.t; - producer_machine_type : string option; - disconnect : (int * int) option; -} - -type bootstrap = {node : Node.t; dal_node : Dal_node.t; client : Client.t} - -type baker = { - node : Node.t; - dal_node : Dal_node.t; - baker : Baker.t; - account : Account.key; - stake : int; -} - -type producer = { - node : Node.t; - dal_node : Dal_node.t; - client : Client.t; - account : Account.key; - is_ready : unit Lwt.t; -} - -type observer = {node : Node.t; dal_node : Dal_node.t; slot_index : int} - -type public_key_hash = string - -type commitment = string - -type per_level_info = { - level : int; - published_commitments : (int, commitment) Hashtbl.t; - attestations : (public_key_hash, Z.t option) Hashtbl.t; - attested_commitments : Z.t; -} - -type metrics = { - level_first_commitment_published : int option; - level_first_commitment_attested : int option; - total_published_commitments : int; - expected_published_commitments : int; - total_attested_commitments : int; - ratio_published_commitments : float; - ratio_attested_commitments : float; - ratio_published_commitments_last_level : float; - ratio_attested_commitments_per_baker : (public_key_hash, float) Hashtbl.t; -} - -let default_metrics = - { - level_first_commitment_published = None; - level_first_commitment_attested = None; - total_published_commitments = 0; - expected_published_commitments = 0; - total_attested_commitments = 0; - ratio_published_commitments = 0.; - ratio_attested_commitments = 0.; - ratio_published_commitments_last_level = 0.; - ratio_attested_commitments_per_baker = Hashtbl.create 0; - } - -type t = { - configuration : configuration; - cloud : Cloud.t; - bootstrap : bootstrap; - bakers : baker list; - producers : producer list; - observers : observer list; - parameters : Dal_common.Parameters.t; - infos : (int, per_level_info) Hashtbl.t; - metrics : (int, metrics) Hashtbl.t; - disconnection_state : Disconnect.t option; -} - -let pp_metrics t - { - level_first_commitment_published; - level_first_commitment_attested; - total_published_commitments; - expected_published_commitments; - total_attested_commitments; - ratio_published_commitments; - ratio_attested_commitments; - ratio_published_commitments_last_level; - ratio_attested_commitments_per_baker; - } = - (match level_first_commitment_published with - | None -> () - | Some level_first_commitment_published -> - Log.info - "First commitment published level: %d" - level_first_commitment_published) ; - (match level_first_commitment_attested with - | None -> () - | Some level_first_commitment_attested -> - Log.info - "First commitment attested level: %d" - level_first_commitment_attested) ; - Log.info "Total published commitments: %d" total_published_commitments ; - Log.info "Expected published commitments: %d" expected_published_commitments ; - Log.info "Total attested commitments: %d" total_attested_commitments ; - Log.info "Ratio published commitments: %f" ratio_published_commitments ; - Log.info "Ratio attested commitments: %f" ratio_attested_commitments ; - Log.info - "Ratio published commitments last level: %f" - ratio_published_commitments_last_level ; - t.bakers |> List.to_seq - |> Seq.iter (fun {account; stake; _} -> - match - Hashtbl.find_opt - ratio_attested_commitments_per_baker - account.Account.public_key_hash - with - | None -> Log.info "No ratio for %s" account.Account.public_key_hash - | Some ratio -> - Log.info - "Ratio for %s (with stake %d): %f" - account.Account.public_key_hash - stake - ratio) - -let push_metrics t - { - level_first_commitment_published = _; - level_first_commitment_attested = _; - total_published_commitments; - expected_published_commitments; - total_attested_commitments; - ratio_published_commitments; - ratio_attested_commitments; - ratio_published_commitments_last_level; - ratio_attested_commitments_per_baker; - } = - (* There are three metrics grouped by labels. *) - t.bakers |> List.to_seq - |> Seq.iter (fun {account; stake; _} -> - let name = - Format.asprintf - "%s (stake: %d)" - account.Account.public_key_hash - stake - in - let value = - match - Hashtbl.find_opt - ratio_attested_commitments_per_baker - account.Account.public_key_hash - with - | None -> 0. - | Some d -> d - in - Cloud.push_metric - t.cloud - ~labels:[("attester", name)] - ~name:"tezt_attested_ratio_per_baker" - (int_of_float value)) ; - Cloud.push_metric - t.cloud - ~name:"tezt_commitments_ratio" - ~labels:[("kind", "published")] - (int_of_float ratio_published_commitments) ; - Cloud.push_metric - t.cloud - ~name:"tezt_commitments_ratio" - ~labels:[("kind", "attested")] - (int_of_float ratio_attested_commitments) ; - Cloud.push_metric - t.cloud - ~name:"tezt_commitments_ratio" - ~labels:[("kind", "published_last_level")] - (int_of_float ratio_published_commitments_last_level) ; - Cloud.push_metric - t.cloud - ~name:"tezt_commitments" - ~labels:[("kind", "expected")] - expected_published_commitments ; - Cloud.push_metric - t.cloud - ~name:"tezt_commitments" - ~labels:[("kind", "published")] - total_published_commitments ; - Cloud.push_metric - t.cloud - ~name:"tezt_commitments" - ~labels:[("kind", "attested")] - total_attested_commitments - -let published_level_of_attested_level t level = - level - t.parameters.attestation_lag - -let update_level_first_commitment_published _t per_level_info metrics = - match metrics.level_first_commitment_published with - | None -> - if Hashtbl.length per_level_info.published_commitments > 0 then - Some per_level_info.level - else None - | Some l -> Some l - -let update_level_first_commitment_attested _t per_level_info metrics = - match metrics.level_first_commitment_attested with - | None -> - if Z.popcount per_level_info.attested_commitments > 0 then - Some per_level_info.level - else None - | Some l -> Some l - -let update_total_published_commitments _t per_level_info metrics = - metrics.total_published_commitments - + Hashtbl.length per_level_info.published_commitments - -let update_expected_published_commitments t metrics = - match metrics.level_first_commitment_published with - | None -> 0 - | Some _ -> - (* -1 since we are looking at level n operation submitted at the previous - level. *) - let producers = - min t.configuration.dal_node_producer t.parameters.number_of_slots - in - metrics.expected_published_commitments + producers - -let update_total_attested_commitments _t per_level_info metrics = - metrics.total_attested_commitments - + Z.popcount per_level_info.attested_commitments - -let update_ratio_published_commitments _t _per_level_info metrics = - if metrics.expected_published_commitments = 0 then 0. - else - float_of_int metrics.total_published_commitments - *. 100. - /. float_of_int metrics.expected_published_commitments - -let update_ratio_published_commitments_last_level t per_level_info metrics = - match metrics.level_first_commitment_published with - | None -> 0. - | Some _ -> - let producers = - min t.configuration.dal_node_producer t.parameters.number_of_slots - in - if producers = 0 then 100. - else - float_of_int (Hashtbl.length per_level_info.published_commitments) - *. 100. /. float_of_int producers - -let update_ratio_attested_commitments t per_level_info metrics = - match metrics.level_first_commitment_attested with - | None -> 0. - | Some level_first_commitment_attested -> ( - let published_level = - published_level_of_attested_level t per_level_info.level - in - match Hashtbl.find_opt t.infos published_level with - | None -> - Log.warn - "Unexpected error: The level %d is missing in the infos table" - published_level ; - 0. - | Some old_per_level_info -> - let n = Hashtbl.length old_per_level_info.published_commitments in - let weight = - per_level_info.level - level_first_commitment_attested - |> float_of_int - in - if n = 0 then metrics.ratio_attested_commitments - else - let bitset = - Z.popcount per_level_info.attested_commitments * 100 / n - |> float_of_int - in - let ratio = - ((metrics.ratio_attested_commitments *. weight) +. bitset) - /. (weight +. 1.) - in - ratio) - -let update_ratio_attested_commitments_per_baker t per_level_info metrics = - match metrics.level_first_commitment_attested with - | None -> Hashtbl.create 0 - | Some level_first_commitment_attested -> ( - let published_level = - published_level_of_attested_level t per_level_info.level - in - match Hashtbl.find_opt t.infos published_level with - | None -> - Log.warn - "Unexpected error: The level %d is missing in the infos table" - published_level ; - Hashtbl.create 0 - | Some old_per_level_info -> - let n = Hashtbl.length old_per_level_info.published_commitments in - let weight = - per_level_info.level - level_first_commitment_attested - |> float_of_int - in - t.bakers |> List.to_seq - |> Seq.map (fun ({account; _} : baker) -> - let bitset = - float_of_int - @@ - match - Hashtbl.find_opt - per_level_info.attestations - account.Account.public_key_hash - with - | None -> (* No attestation in block *) 0 - | Some (Some z) when n = 0 -> - if z = Z.zero then (* No slot were published. *) 100 - else - Test.fail - "Wow wow wait! It seems an invariant is broken. \ - Either on the test side, or on the DAL node side" - | Some (Some z) -> - (* Attestation with DAL payload *) - if n = 0 then 100 else Z.popcount z * 100 / n - | Some None -> - (* Attestation without DAL payload: no DAL rights. *) 100 - in - let old_ratio = - match - Hashtbl.find_opt - metrics.ratio_attested_commitments_per_baker - account.Account.public_key_hash - with - | None -> 0. - | Some ratio -> ratio - in - if n = 0 then (account.Account.public_key_hash, old_ratio) - else - ( account.Account.public_key_hash, - ((old_ratio *. weight) +. bitset) /. (weight +. 1.) )) - |> Hashtbl.of_seq) - -let get_metrics t infos_per_level metrics = - let level_first_commitment_published = - update_level_first_commitment_published t infos_per_level metrics - in - let level_first_commitment_attested = - update_level_first_commitment_attested t infos_per_level metrics - in - (* Metrics below depends on the new value for the metrics above. *) - let metrics = - { - metrics with - level_first_commitment_attested; - level_first_commitment_published; - } - in - let total_published_commitments = - update_total_published_commitments t infos_per_level metrics - in - let expected_published_commitments = - update_expected_published_commitments t metrics - in - let ratio_published_commitments_last_level = - update_ratio_published_commitments_last_level t infos_per_level metrics - in - let total_attested_commitments = - update_total_attested_commitments t infos_per_level metrics - in - (* Metrics below depends on the new value for the metrics above. *) - let metrics = - { - metrics with - level_first_commitment_attested; - level_first_commitment_published; - total_published_commitments; - expected_published_commitments; - total_attested_commitments; - ratio_published_commitments_last_level; - } - in - let ratio_published_commitments = - update_ratio_published_commitments t infos_per_level metrics - in - let ratio_attested_commitments = - update_ratio_attested_commitments t infos_per_level metrics - in - let ratio_attested_commitments_per_baker = - update_ratio_attested_commitments_per_baker t infos_per_level metrics - in - { - level_first_commitment_published; - level_first_commitment_attested; - total_published_commitments; - expected_published_commitments; - total_attested_commitments; - ratio_published_commitments; - ratio_attested_commitments; - ratio_published_commitments_last_level; - ratio_attested_commitments_per_baker; - } - -let get_infos_per_level client ~level = - let block = string_of_int level in - let* header = - Client.RPC.call client @@ RPC.get_chain_block_header ~block () - in - let* metadata = - Client.RPC.call client @@ RPC.get_chain_block_metadata_raw ~block () - in - let* operations = - Client.RPC.call client @@ RPC.get_chain_block_operations ~block () - in - let level = JSON.(header |-> "level" |> as_int) in - let attested_commitments = - JSON.(metadata |-> "dal_attestation" |> as_string |> Z.of_string) - in - let manager_operations = JSON.(operations |=> 3 |> as_list) in - let is_published_commitment operation = - JSON.( - operation |-> "contents" |=> 0 |-> "kind" |> as_string - = "dal_publish_commitment") - in - let get_commitment operation = - JSON.( - operation |-> "contents" |=> 0 |-> "slot_header" |-> "commitment" - |> as_string) - in - let get_slot_index operation = - JSON.( - operation |-> "contents" |=> 0 |-> "slot_header" |-> "slot_index" - |> as_int) - in - let published_commitments = - manager_operations |> List.to_seq - |> Seq.filter is_published_commitment - |> Seq.map (fun operation -> - (get_slot_index operation, get_commitment operation)) - |> Hashtbl.of_seq - in - let consensus_operations = JSON.(operations |=> 0 |> as_list) in - let is_dal_attestation operation = - JSON.( - operation |-> "contents" |=> 0 |-> "kind" |> as_string - = "attestation_with_dal") - in - let get_public_key_hash operation = - JSON.( - operation |-> "contents" |=> 0 |-> "metadata" |-> "delegate" |> as_string) - in - let get_dal_attestation operation = - JSON.( - operation |-> "contents" |=> 0 |-> "dal_attestation" |> as_string - |> Z.of_string |> Option.some) - in - let attestations = - consensus_operations |> List.to_seq - |> Seq.map (fun operation -> - let public_key_hash = get_public_key_hash operation in - let dal_attestation = - if is_dal_attestation operation then get_dal_attestation operation - else None - in - (public_key_hash, dal_attestation)) - |> Hashtbl.of_seq - in - Lwt.return {level; published_commitments; attestations; attested_commitments} - let add_source cloud agent ~job_name node dal_node = let agent_name = Agent.name agent in let node_metric_target = @@ -690,7 +160,7 @@ let add_source cloud agent ~job_name node dal_node = ~job_name [node_metric_target; dal_node_metric_target] -let init_bootstrap cloud (configuration : configuration) agent = +let init_bootstrap cloud (configuration : Types.configuration) agent = let* bootstrap_node = Node.Agent.create ~name:"bootstrap-node" agent in let* dal_bootstrap_node = Dal_node.Agent.create ~name:"bootstrap-dal-node" agent ~node:bootstrap_node @@ -763,12 +233,12 @@ let init_bootstrap cloud (configuration : configuration) agent = bootstrap_node dal_bootstrap_node in - let (bootstrap : bootstrap) = + let (bootstrap : Types.bootstrap) = {node = bootstrap_node; dal_node = dal_bootstrap_node; client} in Lwt.return (bootstrap, baker_accounts, producer_accounts) -let init_baker cloud (configuration : configuration) ~bootstrap_node +let init_baker cloud (configuration : Types.configuration) ~bootstrap_node ~dal_bootstrap_node account i agent = let stake = List.nth configuration.stake i in let* node = @@ -820,7 +290,7 @@ let init_baker cloud (configuration : configuration) ~bootstrap_node node dal_node in - Lwt.return {node; dal_node; baker; account; stake} + Lwt.return Types.{node; dal_node; baker; account; stake} let init_producer cloud ~bootstrap_node ~dal_bootstrap_node ~number_of_slots account i agent = @@ -866,7 +336,7 @@ let init_producer cloud ~bootstrap_node ~dal_bootstrap_node ~number_of_slots (* We do not wait on the promise because loading the SRS takes some time. Instead we will publish commitments only once this promise is fulfilled. *) let is_ready = Dal_node.run ~event_level:`Notice dal_node in - Lwt.return {client; node; dal_node; account; is_ready} + Lwt.return Types.{client; node; dal_node; account; is_ready} let init_observer cloud ~bootstrap_node ~dal_bootstrap_node ~slot_index i agent = @@ -899,9 +369,9 @@ let init_observer cloud ~bootstrap_node ~dal_bootstrap_node ~slot_index i agent dal_node in let* () = Dal_node.run ~event_level:`Notice dal_node in - Lwt.return {node; dal_node; slot_index} + Lwt.return Types.{node; dal_node; slot_index} -let init ~(configuration : configuration) cloud next_agent = +let init ~(configuration : Types.configuration) cloud next_agent = let* bootstrap_agent = next_agent ~name:"bootstrap" in let* attesters_agents = List.init (List.length configuration.stake) (fun i -> @@ -967,13 +437,13 @@ let init ~(configuration : configuration) cloud next_agent = in let infos = Hashtbl.create 101 in let metrics = Hashtbl.create 101 in - Hashtbl.replace metrics 1 default_metrics ; + Hashtbl.replace metrics 1 Metrics.default ; let disconnection_state = Option.map Disconnect.init configuration.disconnect in Lwt.return { - cloud; + Types.cloud; configuration; bootstrap; bakers; @@ -985,7 +455,7 @@ let init ~(configuration : configuration) cloud next_agent = disconnection_state; } -let on_new_level t level = +let on_new_level (t : Types.t) level = let node = t.bootstrap.node in let client = t.bootstrap.client in let* () = @@ -993,13 +463,13 @@ let on_new_level t level = Lwt.return_unit in Log.info "Start process level %d" level ; - let* infos_per_level = get_infos_per_level client ~level in + let* infos_per_level = Metrics.get_infos_per_level client ~level in Hashtbl.replace t.infos level infos_per_level ; let metrics = - get_metrics t infos_per_level (Hashtbl.find t.metrics (level - 1)) + Metrics.get_metrics t infos_per_level (Hashtbl.find t.metrics (level - 1)) in - pp_metrics t metrics ; - push_metrics t metrics ; + Metrics.pp_metrics t metrics ; + Metrics.push_metrics t metrics ; Hashtbl.replace t.metrics level metrics ; match t.disconnection_state with | None -> Lwt.return t @@ -1024,7 +494,7 @@ let on_new_level t level = Lwt.return {t with disconnection_state = Some disconnection_state} let produce_slot t level i = - let producer = List.nth t.producers i in + let producer = List.nth t.Types.producers i in let index = i mod t.parameters.number_of_slots in let content = Format.asprintf "%d:%d" level index @@ -1049,12 +519,12 @@ let producers_not_ready t = (* If not all the producer nodes are ready, we do not publish the commitment for the current level. Another attempt will be done at the next level. *) let producer_ready producer = - match Lwt.state producer.is_ready with + match Lwt.state producer.Types.is_ready with | Sleep -> true | Fail exn -> Lwt.reraise exn | Return () -> false in - List.for_all producer_ready t.producers + List.for_all producer_ready t.Types.producers let rec loop t level = let p = on_new_level t level in @@ -1070,21 +540,14 @@ let rec loop t level = loop t (level + 1) let configuration = - let stake = Cli.stake in - let stake_machine_type = Cli.stake_machine_type in - let dal_node_producer = Cli.producers in - let observer_slot_indices = Cli.observer_slot_indices in - let protocol = Cli.protocol in - let producer_machine_type = Cli.producer_machine_type in - let disconnect = Cli.disconnect in { - stake; - stake_machine_type; - dal_node_producer; - observer_slot_indices; - protocol; - producer_machine_type; - disconnect; + Types.stake = Cli.stake; + stake_machine_type = Cli.stake_machine_type; + dal_node_producer = Cli.producers; + observer_slot_indices = Cli.observer_slot_indices; + protocol = Cli.protocol; + producer_machine_type = Cli.producer_machine_type; + disconnect = Cli.disconnect; } let benchmark () = diff --git a/tezt/tests/cloud/disconnect.ml b/tezt/tests/cloud/disconnect.ml new file mode 100644 index 000000000000..16686fc1d05f --- /dev/null +++ b/tezt/tests/cloud/disconnect.ml @@ -0,0 +1,72 @@ +(*****************************************************************************) +(* *) +(* SPDX-License-Identifier: MIT *) +(* SPDX-FileCopyrightText: 2024 Nomadic Labs *) +(* *) +(*****************************************************************************) + +module Map = Map.Make (struct + type t = int + + let compare = Int.compare +end) + +(* The [state] contains bakers indexes that have been disconnected associated + with the level at which they have been disconnected + Each [frequency] number of levels, a baker, chosen in a round-robin + fashion, is disconnected. + A disconnected baker reconnects after [reconnection_delay] levels. + The next baker to disconnect is stored in [next_to_disconnect]; + it is 0 when no baker has been disconnected yet *) +type t = { + disconnected_bakers : int Map.t; + frequency : int; + reconnection_delay : int; + next_to_disconnect : int; +} + +let init (frequency, reconnection_delay) = + { + disconnected_bakers = Map.empty; + frequency; + reconnection_delay; + next_to_disconnect = 0; + } + +(* When a relevant level is reached, [disconnect t level f] put the baker of + index [t.next_to_disconnect] in [t.disconnected_bakers] and applies [f] to + this baker. If it is already disconnected, the function does nothing and + returns [t] unchanged *) +let disconnect t level f = + (* let open Lwt_syntax in *) + if level mod t.frequency <> 0 then Lwt.return t + else + match Map.find_opt t.next_to_disconnect t.disconnected_bakers with + | Some _ -> + Log.info + "disconnect: all bakers have been disconnected, waiting for next \ + baker to reconnect." ; + Lwt.return t + | None -> + let* () = f t.next_to_disconnect in + Lwt.return + { + t with + disconnected_bakers = + Map.add t.next_to_disconnect level t.disconnected_bakers; + next_to_disconnect = t.next_to_disconnect + 1; + } + +(* Applies [f] on the bakers that have been disconnected for long enough *) +let reconnect t level f = + let bakers_to_reconnect, bakers_to_keep_disconnected = + Map.partition + (fun _ disco_level -> level >= disco_level + t.reconnection_delay) + t.disconnected_bakers + in + let* () = + Map.to_seq bakers_to_reconnect + |> List.of_seq + |> Lwt_list.iter_p (fun (b, _) -> f b) + in + Lwt.return {t with disconnected_bakers = bakers_to_keep_disconnected} diff --git a/tezt/tests/cloud/metrics.ml b/tezt/tests/cloud/metrics.ml new file mode 100644 index 000000000000..e85c5179a2f3 --- /dev/null +++ b/tezt/tests/cloud/metrics.ml @@ -0,0 +1,410 @@ +(*****************************************************************************) +(* *) +(* SPDX-License-Identifier: MIT *) +(* SPDX-FileCopyrightText: 2024 Nomadic Labs *) +(* *) +(*****************************************************************************) + +let default = + Types. + { + level_first_commitment_published = None; + level_first_commitment_attested = None; + total_published_commitments = 0; + expected_published_commitments = 0; + total_attested_commitments = 0; + ratio_published_commitments = 0.; + ratio_attested_commitments = 0.; + ratio_published_commitments_last_level = 0.; + ratio_attested_commitments_per_baker = Hashtbl.create 0; + } + +let pp_metrics t + Types. + { + level_first_commitment_published; + level_first_commitment_attested; + total_published_commitments; + expected_published_commitments; + total_attested_commitments; + ratio_published_commitments; + ratio_attested_commitments; + ratio_published_commitments_last_level; + ratio_attested_commitments_per_baker; + } = + (match level_first_commitment_published with + | None -> () + | Some level_first_commitment_published -> + Log.info + "First commitment published level: %d" + level_first_commitment_published) ; + (match level_first_commitment_attested with + | None -> () + | Some level_first_commitment_attested -> + Log.info + "First commitment attested level: %d" + level_first_commitment_attested) ; + Log.info "Total published commitments: %d" total_published_commitments ; + Log.info "Expected published commitments: %d" expected_published_commitments ; + Log.info "Total attested commitments: %d" total_attested_commitments ; + Log.info "Ratio published commitments: %f" ratio_published_commitments ; + Log.info "Ratio attested commitments: %f" ratio_attested_commitments ; + Log.info + "Ratio published commitments last level: %f" + ratio_published_commitments_last_level ; + t.Types.bakers + |> List.iter (fun Types.{account; stake; _} -> + match + Hashtbl.find_opt + ratio_attested_commitments_per_baker + account.Account.public_key_hash + with + | None -> Log.info "No ratio for %s" account.Account.public_key_hash + | Some ratio -> + Log.info + "Ratio for %s (with stake %d): %f" + account.Account.public_key_hash + stake + ratio) + +let push_metrics (t : Types.t) + Types. + { + level_first_commitment_published = _; + level_first_commitment_attested = _; + total_published_commitments; + expected_published_commitments; + total_attested_commitments; + ratio_published_commitments; + ratio_attested_commitments; + ratio_published_commitments_last_level; + ratio_attested_commitments_per_baker; + } = + (* There are three metrics grouped by labels. *) + t.bakers + |> List.iter (fun Types.{account; stake; _} -> + let name = + Format.asprintf + "%s (stake: %d)" + account.Account.public_key_hash + stake + in + let value = + match + Hashtbl.find_opt + ratio_attested_commitments_per_baker + account.Account.public_key_hash + with + | None -> 0. + | Some d -> d + in + Cloud.push_metric + t.cloud + ~labels:[("attester", name)] + ~name:"tezt_attested_ratio_per_baker" + (int_of_float value)) ; + Cloud.push_metric + t.cloud + ~name:"tezt_commitments_ratio" + ~labels:[("kind", "published")] + (int_of_float ratio_published_commitments) ; + Cloud.push_metric + t.cloud + ~name:"tezt_commitments_ratio" + ~labels:[("kind", "attested")] + (int_of_float ratio_attested_commitments) ; + Cloud.push_metric + t.cloud + ~name:"tezt_commitments_ratio" + ~labels:[("kind", "published_last_level")] + (int_of_float ratio_published_commitments_last_level) ; + Cloud.push_metric + t.cloud + ~name:"tezt_commitments" + ~labels:[("kind", "expected")] + expected_published_commitments ; + Cloud.push_metric + t.cloud + ~name:"tezt_commitments" + ~labels:[("kind", "published")] + total_published_commitments ; + Cloud.push_metric + t.cloud + ~name:"tezt_commitments" + ~labels:[("kind", "attested")] + total_attested_commitments + +let published_level_of_attested_level (t : Types.t) level = + level - t.parameters.attestation_lag + +let update_level_first_commitment_published _t per_level_info metrics = + match metrics.Types.level_first_commitment_published with + | None -> + if Hashtbl.length per_level_info.Types.published_commitments > 0 then + Some per_level_info.level + else None + | Some l -> Some l + +let update_level_first_commitment_attested _t per_level_info metrics = + match metrics.Types.level_first_commitment_attested with + | None -> + if Z.popcount per_level_info.Types.attested_commitments > 0 then + Some per_level_info.level + else None + | Some l -> Some l + +let update_total_published_commitments _t per_level_info metrics = + metrics.Types.total_published_commitments + + Hashtbl.length per_level_info.Types.published_commitments + +let update_expected_published_commitments (t : Types.t) metrics = + match metrics.Types.level_first_commitment_published with + | None -> 0 + | Some _ -> + (* -1 since we are looking at level n operation submitted at the previous + level. *) + let producers = + min t.configuration.dal_node_producer t.parameters.number_of_slots + in + metrics.expected_published_commitments + producers + +let update_total_attested_commitments _t per_level_info metrics = + metrics.Types.total_attested_commitments + + Z.popcount per_level_info.Types.attested_commitments + +let update_ratio_published_commitments _t _per_level_info metrics = + if metrics.Types.expected_published_commitments = 0 then 0. + else + float_of_int metrics.total_published_commitments + *. 100. + /. float_of_int metrics.expected_published_commitments + +let update_ratio_published_commitments_last_level (t : Types.t) per_level_info + metrics = + match metrics.Types.level_first_commitment_published with + | None -> 0. + | Some _ -> + let producers = + min t.configuration.dal_node_producer t.parameters.number_of_slots + in + if producers = 0 then 100. + else + float_of_int (Hashtbl.length per_level_info.Types.published_commitments) + *. 100. /. float_of_int producers + +let update_ratio_attested_commitments (t : Types.t) per_level_info metrics = + match metrics.Types.level_first_commitment_attested with + | None -> 0. + | Some level_first_commitment_attested -> ( + let published_level = + published_level_of_attested_level t per_level_info.Types.level + in + match Hashtbl.find_opt t.infos published_level with + | None -> + Log.warn + "Unexpected error: The level %d is missing in the infos table" + published_level ; + 0. + | Some old_per_level_info -> + let n = Hashtbl.length old_per_level_info.published_commitments in + let weight = + per_level_info.Types.level - level_first_commitment_attested + |> float_of_int + in + if n = 0 then metrics.ratio_attested_commitments + else + let bitset = + Z.popcount per_level_info.attested_commitments * 100 / n + |> float_of_int + in + let ratio = + ((metrics.ratio_attested_commitments *. weight) +. bitset) + /. (weight +. 1.) + in + ratio) + +let update_ratio_attested_commitments_per_baker (t : Types.t) per_level_info + metrics = + match metrics.Types.level_first_commitment_attested with + | None -> Hashtbl.create 0 + | Some level_first_commitment_attested -> ( + let published_level = + published_level_of_attested_level t per_level_info.Types.level + in + match Hashtbl.find_opt t.infos published_level with + | None -> + Log.warn + "Unexpected error: The level %d is missing in the infos table" + published_level ; + Hashtbl.create 0 + | Some old_per_level_info -> + let n = Hashtbl.length old_per_level_info.published_commitments in + let weight = + per_level_info.level - level_first_commitment_attested + |> float_of_int + in + t.bakers |> List.to_seq + |> Seq.map (fun ({account; _} : Types.baker) -> + let bitset = + float_of_int + @@ + match + Hashtbl.find_opt + per_level_info.attestations + account.Account.public_key_hash + with + | None -> (* No attestation in block *) 0 + | Some (Some z) when n = 0 -> + if z = Z.zero then (* No slot were published. *) 100 + else + Test.fail + "Wow wow wait! It seems an invariant is broken. \ + Either on the test side, or on the DAL node side" + | Some (Some z) -> + (* Attestation with DAL payload *) + if n = 0 then 100 else Z.popcount z * 100 / n + | Some None -> + (* Attestation without DAL payload: no DAL rights. *) 100 + in + let old_ratio = + match + Hashtbl.find_opt + metrics.ratio_attested_commitments_per_baker + account.Account.public_key_hash + with + | None -> 0. + | Some ratio -> ratio + in + if n = 0 then (account.Account.public_key_hash, old_ratio) + else + ( account.Account.public_key_hash, + ((old_ratio *. weight) +. bitset) /. (weight +. 1.) )) + |> Hashtbl.of_seq) + +let get_metrics t infos_per_level metrics = + let level_first_commitment_published = + update_level_first_commitment_published t infos_per_level metrics + in + let level_first_commitment_attested = + update_level_first_commitment_attested t infos_per_level metrics + in + (* Metrics below depends on the new value for the metrics above. *) + let metrics = + { + metrics with + level_first_commitment_attested; + level_first_commitment_published; + } + in + let total_published_commitments = + update_total_published_commitments t infos_per_level metrics + in + let expected_published_commitments = + update_expected_published_commitments t metrics + in + let ratio_published_commitments_last_level = + update_ratio_published_commitments_last_level t infos_per_level metrics + in + let total_attested_commitments = + update_total_attested_commitments t infos_per_level metrics + in + (* Metrics below depends on the new value for the metrics above. *) + let metrics = + { + metrics with + level_first_commitment_attested; + level_first_commitment_published; + total_published_commitments; + expected_published_commitments; + total_attested_commitments; + ratio_published_commitments_last_level; + } + in + let ratio_published_commitments = + update_ratio_published_commitments t infos_per_level metrics + in + let ratio_attested_commitments = + update_ratio_attested_commitments t infos_per_level metrics + in + let ratio_attested_commitments_per_baker = + update_ratio_attested_commitments_per_baker t infos_per_level metrics + in + Types. + { + level_first_commitment_published; + level_first_commitment_attested; + total_published_commitments; + expected_published_commitments; + total_attested_commitments; + ratio_published_commitments; + ratio_attested_commitments; + ratio_published_commitments_last_level; + ratio_attested_commitments_per_baker; + } + +let get_infos_per_level client ~level = + let block = string_of_int level in + let* header = + Client.RPC.call client @@ RPC.get_chain_block_header ~block () + in + let* metadata = + Client.RPC.call client @@ RPC.get_chain_block_metadata_raw ~block () + in + let* operations = + Client.RPC.call client @@ RPC.get_chain_block_operations ~block () + in + let level = JSON.(header |-> "level" |> as_int) in + let attested_commitments = + JSON.(metadata |-> "dal_attestation" |> as_string |> Z.of_string) + in + let manager_operations = JSON.(operations |=> 3 |> as_list) in + let is_published_commitment operation = + JSON.( + operation |-> "contents" |=> 0 |-> "kind" |> as_string + = "dal_publish_commitment") + in + let get_commitment operation = + JSON.( + operation |-> "contents" |=> 0 |-> "slot_header" |-> "commitment" + |> as_string) + in + let get_slot_index operation = + JSON.( + operation |-> "contents" |=> 0 |-> "slot_header" |-> "slot_index" + |> as_int) + in + let published_commitments = + manager_operations |> List.to_seq + |> Seq.filter is_published_commitment + |> Seq.map (fun operation -> + (get_slot_index operation, get_commitment operation)) + |> Hashtbl.of_seq + in + let consensus_operations = JSON.(operations |=> 0 |> as_list) in + let is_dal_attestation operation = + JSON.( + operation |-> "contents" |=> 0 |-> "kind" |> as_string + = "attestation_with_dal") + in + let get_public_key_hash operation = + JSON.( + operation |-> "contents" |=> 0 |-> "metadata" |-> "delegate" |> as_string) + in + let get_dal_attestation operation = + JSON.( + operation |-> "contents" |=> 0 |-> "dal_attestation" |> as_string + |> Z.of_string |> Option.some) + in + let attestations = + consensus_operations |> List.to_seq + |> Seq.map (fun operation -> + let public_key_hash = get_public_key_hash operation in + let dal_attestation = + if is_dal_attestation operation then get_dal_attestation operation + else None + in + (public_key_hash, dal_attestation)) + |> Hashtbl.of_seq + in + Lwt.return + Types.{level; published_commitments; attestations; attested_commitments} diff --git a/tezt/tests/cloud/types.ml b/tezt/tests/cloud/types.ml new file mode 100644 index 000000000000..b517097175ad --- /dev/null +++ b/tezt/tests/cloud/types.ml @@ -0,0 +1,72 @@ +(*****************************************************************************) +(* *) +(* SPDX-License-Identifier: MIT *) +(* SPDX-FileCopyrightText: 2024 Nomadic Labs *) +(* *) +(*****************************************************************************) + +type configuration = { + stake : int list; + stake_machine_type : string list option; + dal_node_producer : int; + observer_slot_indices : int list; + protocol : Protocol.t; + producer_machine_type : string option; + disconnect : (int * int) option; +} + +type bootstrap = {node : Node.t; dal_node : Dal_node.t; client : Client.t} + +type baker = { + node : Node.t; + dal_node : Dal_node.t; + baker : Baker.t; + account : Account.key; + stake : int; +} + +type producer = { + node : Node.t; + dal_node : Dal_node.t; + client : Client.t; + account : Account.key; + is_ready : unit Lwt.t; +} + +type observer = {node : Node.t; dal_node : Dal_node.t; slot_index : int} + +type public_key_hash = string + +type commitment = string + +type per_level_info = { + level : int; + published_commitments : (int, commitment) Hashtbl.t; + attestations : (public_key_hash, Z.t option) Hashtbl.t; + attested_commitments : Z.t; +} + +type metrics = { + level_first_commitment_published : int option; + level_first_commitment_attested : int option; + total_published_commitments : int; + expected_published_commitments : int; + total_attested_commitments : int; + ratio_published_commitments : float; + ratio_attested_commitments : float; + ratio_published_commitments_last_level : float; + ratio_attested_commitments_per_baker : (public_key_hash, float) Hashtbl.t; +} + +type t = { + configuration : configuration; + cloud : Cloud.t; + bootstrap : bootstrap; + bakers : baker list; + producers : producer list; + observers : observer list; + parameters : Dal_common.Parameters.t; + infos : (int, per_level_info) Hashtbl.t; + metrics : (int, metrics) Hashtbl.t; + disconnection_state : Disconnect.t option; +} -- GitLab