From da494644b5861328f9a06f0c577f260b234096c8 Mon Sep 17 00:00:00 2001 From: Eugen Zalinescu Date: Tue, 6 Aug 2024 15:10:30 +0200 Subject: [PATCH 1/5] DAL/Node: move `last_processed_level` store to `Store.t` --- src/bin_dal_node/daemon.ml | 23 +++++++++++------------ src/bin_dal_node/node_context.ml | 6 +----- src/bin_dal_node/node_context.mli | 5 ----- src/bin_dal_node/store.ml | 3 +++ src/bin_dal_node/store.mli | 2 ++ 5 files changed, 17 insertions(+), 22 deletions(-) diff --git a/src/bin_dal_node/daemon.ml b/src/bin_dal_node/daemon.ml index a9bc34445677..366075efe2cd 100644 --- a/src/bin_dal_node/daemon.ml +++ b/src/bin_dal_node/daemon.ml @@ -402,6 +402,7 @@ module Handler = struct let process_block ctxt cctxt proto_parameters finalized_shell_header = let open Lwt_result_syntax in + let store = Node_context.get_store ctxt in let block_level = finalized_shell_header.Block_header.level in let block = `Level block_level in let pred_level = Int32.pred block_level in @@ -431,7 +432,7 @@ module Handler = struct ~number_of_slots:proto_parameters.Dal_plugin.number_of_slots ~block_level slot_headers - (Node_context.get_store ctxt) + store else return_unit in let* () = @@ -451,7 +452,7 @@ module Handler = struct Slot_manager.publish_slot_data ~level_committee:(Node_context.fetch_committee ctxt) ~slot_size:proto_parameters.cryptobox_parameters.slot_size - (Node_context.get_store ctxt) + store (Node_context.get_gs_worker ctxt) proto_parameters commitment @@ -466,7 +467,7 @@ module Handler = struct ~attestation_lag:proto_parameters.attestation_lag ~number_of_slots:proto_parameters.number_of_slots (Plugin.is_attested attested_slots) - (Node_context.get_store ctxt) + store in let*! () = remove_unattested_slots_and_shards @@ -488,7 +489,7 @@ module Handler = struct in (* This should be done at the end of the function. *) Last_processed_level.save_last_processed_level - (Node_context.get_last_processed_level_store ctxt) + store.last_processed_level ~level:block_level let rec try_process_block ~retries ctxt cctxt proto_parameters @@ -834,7 +835,7 @@ let clean_up_store ctxt cctxt ~last_processed_level level (module Plugin : Dal_plugin.T with type block_info = Plugin.block_info) in - let lpl_store = Node_context.get_last_processed_level_store ctxt in + let store = Node_context.get_store ctxt in let supports_refutations = Handler.supports_refutations ctxt in (* [target_level] identifies the level wrt to head level at which we want to start the P2P and process blocks as usual. *) @@ -873,7 +874,9 @@ let clean_up_store ctxt cctxt ~last_processed_level else return_unit in let* () = - Last_processed_level.save_last_processed_level lpl_store ~level + Last_processed_level.save_last_processed_level + store.last_processed_level + ~level in clean_up_from_level (Int32.succ level) in @@ -1001,11 +1004,8 @@ let run ~data_dir ~configuration_override = in let* store = Store.init config in let*! metrics_server = Metrics.launch config.metrics_addr in - let* last_processed_level_store = - Last_processed_level.init ~root_dir:(Configuration_file.store_path config) - in let* last_processed_level = - Last_processed_level.load_last_processed_level last_processed_level_store + Last_processed_level.load_last_processed_level store.last_processed_level in (* First wait for the L1 node to be bootstrapped. *) let* () = wait_for_l1_bootstrapped cctxt in @@ -1055,7 +1055,6 @@ let run ~data_dir ~configuration_override = transport_layer cctxt metrics_server - last_processed_level_store in let* () = match last_processed_level with @@ -1067,7 +1066,7 @@ let run ~data_dir ~configuration_override = (* We reload the last processed level because [clean_up_store] has likely modified it. *) let* last_notified_level = - Last_processed_level.load_last_processed_level last_processed_level_store + Last_processed_level.load_last_processed_level store.last_processed_level in let open Constants in let*! crawler = diff --git a/src/bin_dal_node/node_context.ml b/src/bin_dal_node/node_context.ml index 2550f528061b..624783f5fd50 100644 --- a/src/bin_dal_node/node_context.ml +++ b/src/bin_dal_node/node_context.ml @@ -41,12 +41,11 @@ type t = { transport_layer : Gossipsub.Transport_layer.t; mutable profile_ctxt : Profile_manager.t; metrics_server : Metrics.t; - last_processed_level_store : Last_processed_level.t; } let init config profile_ctxt cryptobox shards_proofs_precomputation proto_parameters proto_plugins store gs_worker transport_layer cctxt - metrics_server last_processed_level_store = + metrics_server = let neighbors_cctxts = List.map (fun Configuration_file.{addr; port} -> @@ -73,7 +72,6 @@ let init config profile_ctxt cryptobox shards_proofs_precomputation transport_layer; profile_ctxt; metrics_server; - last_processed_level_store; } let may_reconstruct ~reconstruct slot_id t = @@ -161,8 +159,6 @@ let get_proto_parameters ctxt = ctxt.proto_parameters let get_shards_proofs_precomputation ctxt = ctxt.shards_proofs_precomputation -let get_last_processed_level_store ctxt = ctxt.last_processed_level_store - let get_store ctxt = ctxt.store let get_gs_worker ctxt = ctxt.gs_worker diff --git a/src/bin_dal_node/node_context.mli b/src/bin_dal_node/node_context.mli index b9fef8009397..a0ebc7ebc6ca 100644 --- a/src/bin_dal_node/node_context.mli +++ b/src/bin_dal_node/node_context.mli @@ -41,7 +41,6 @@ val init : Gossipsub.Transport_layer.t -> Tezos_rpc.Context.generic -> Metrics.t -> - Last_processed_level.t -> t (** Returns all the registered plugins *) @@ -107,10 +106,6 @@ val get_shards_proofs_precomputation : (** [get_store ctxt] returns the dal node store. *) val get_store : t -> Store.t -(** [get_last_processed_level_store ctxt] returns the last processed level - store. *) -val get_last_processed_level_store : t -> Last_processed_level.t - (** [get_gs_worker ctxt] returns the Gossipsub worker state. *) val get_gs_worker : t -> Gossipsub.Worker.t diff --git a/src/bin_dal_node/store.ml b/src/bin_dal_node/store.ml index a287111912d0..4fffdd3663c0 100644 --- a/src/bin_dal_node/store.ml +++ b/src/bin_dal_node/store.ml @@ -415,6 +415,7 @@ type t = { Commitment_indexed_cache.t; (* The length of the array is the number of shards per slot *) finalized_commitments : Slot_id_cache.t; + last_processed_level : Last_processed_level.t; } let cache_entry node_store commitment slot shares shard_proofs = @@ -566,6 +567,7 @@ let init config = let* shards = Shards.init base_dir Stores_dirs.shard in let* slots = Slots.init base_dir Stores_dirs.slot in let* skip_list_cells = init_skip_list_cells_store base_dir in + let* last_processed_level = Last_processed_level.init ~root_dir:base_dir in let*! () = Event.(emit store_is_ready ()) in return { @@ -576,6 +578,7 @@ let init config = cache = Commitment_indexed_cache.create Constants.cache_size; finalized_commitments = Slot_id_cache.create ~capacity:Constants.slot_id_cache_size; + last_processed_level; } let add_slot_headers ~number_of_slots ~block_level slot_headers t = diff --git a/src/bin_dal_node/store.mli b/src/bin_dal_node/store.mli index f61e93b9dfec..a4f34b6a4ac7 100644 --- a/src/bin_dal_node/store.mli +++ b/src/bin_dal_node/store.mli @@ -140,6 +140,8 @@ type t = private { (** Cache of commitments indexed by level and then by slot id. The maximum number of levels is given by {!Constants.slot_id_cache_size}. No more than [number_of_slots] commitments can be stored per level. *) + last_processed_level : Last_processed_level.t; + (** Last processed level store *) } (** [cache_entry store commitment entry] adds or replace an entry to -- GitLab From 83bbcef4348f8995d0cd8c4ae924901ca16d6c34 Mon Sep 17 00:00:00 2001 From: Eugen Zalinescu Date: Tue, 6 Aug 2024 16:03:12 +0200 Subject: [PATCH 2/5] DAL/Node: introduce new module Single_value_store --- src/bin_dal_node/daemon.ml | 12 ++--- src/bin_dal_node/last_processed_level.ml | 36 -------------- src/bin_dal_node/last_processed_level.mli | 30 ------------ src/bin_dal_node/single_value_store.ml | 57 +++++++++++++++++++++++ src/bin_dal_node/single_value_store.mli | 36 ++++++++++++++ src/bin_dal_node/store.ml | 8 ++++ src/bin_dal_node/store.mli | 2 + 7 files changed, 107 insertions(+), 74 deletions(-) delete mode 100644 src/bin_dal_node/last_processed_level.ml delete mode 100644 src/bin_dal_node/last_processed_level.mli create mode 100644 src/bin_dal_node/single_value_store.ml create mode 100644 src/bin_dal_node/single_value_store.mli diff --git a/src/bin_dal_node/daemon.ml b/src/bin_dal_node/daemon.ml index 366075efe2cd..adf3af1a926c 100644 --- a/src/bin_dal_node/daemon.ml +++ b/src/bin_dal_node/daemon.ml @@ -488,9 +488,7 @@ module Handler = struct Event.(emit layer1_node_final_block (block_level, block_round)) in (* This should be done at the end of the function. *) - Last_processed_level.save_last_processed_level - store.last_processed_level - ~level:block_level + Store.Last_processed_level.save store.last_processed_level block_level let rec try_process_block ~retries ctxt cctxt proto_parameters finalized_shell_header = @@ -874,9 +872,7 @@ let clean_up_store ctxt cctxt ~last_processed_level else return_unit in let* () = - Last_processed_level.save_last_processed_level - store.last_processed_level - ~level + Store.Last_processed_level.save store.last_processed_level level in clean_up_from_level (Int32.succ level) in @@ -1005,7 +1001,7 @@ let run ~data_dir ~configuration_override = let* store = Store.init config in let*! metrics_server = Metrics.launch config.metrics_addr in let* last_processed_level = - Last_processed_level.load_last_processed_level store.last_processed_level + Store.Last_processed_level.load store.last_processed_level in (* First wait for the L1 node to be bootstrapped. *) let* () = wait_for_l1_bootstrapped cctxt in @@ -1066,7 +1062,7 @@ let run ~data_dir ~configuration_override = (* We reload the last processed level because [clean_up_store] has likely modified it. *) let* last_notified_level = - Last_processed_level.load_last_processed_level store.last_processed_level + Store.Last_processed_level.load store.last_processed_level in let open Constants in let*! crawler = diff --git a/src/bin_dal_node/last_processed_level.ml b/src/bin_dal_node/last_processed_level.ml deleted file mode 100644 index fea3c30f538b..000000000000 --- a/src/bin_dal_node/last_processed_level.ml +++ /dev/null @@ -1,36 +0,0 @@ -(*****************************************************************************) -(* *) -(* SPDX-License-Identifier: MIT *) -(* SPDX-FileCopyrightText: 2024 Functori, *) -(* SPDX-FileCopyrightText: 2024 Nomadic Labs, *) -(* *) -(*****************************************************************************) - -let last_processed_level_filename = "last_processed_level" - -module KVS = Key_value_store - -type nonrec t = (unit, unit, int32) KVS.t - -let init ~root_dir : t tzresult Lwt.t = KVS.init ~lru_size:1 ~root_dir - -let file_layout ~root_dir () = - let filepath = Filename.concat root_dir last_processed_level_filename in - Key_value_store.layout - ~encoding:Data_encoding.int32 - ~filepath - ~eq:Stdlib.( = ) - ~index_of:(fun () -> 0) - ~number_of_keys_per_file:1 - () - -let load_last_processed_level (t : t) = - let open Lwt_result_syntax in - let* exists = KVS.value_exists t file_layout () () in - if exists then - let* res = KVS.read_value t file_layout () () in - return_some res - else return_none - -let save_last_processed_level (t : t) ~level = - KVS.write_value ~override:true t file_layout () () level diff --git a/src/bin_dal_node/last_processed_level.mli b/src/bin_dal_node/last_processed_level.mli deleted file mode 100644 index 6efc7a6b8ecd..000000000000 --- a/src/bin_dal_node/last_processed_level.mli +++ /dev/null @@ -1,30 +0,0 @@ -(*****************************************************************************) -(* *) -(* SPDX-License-Identifier: MIT *) -(* SPDX-FileCopyrightText: 2024 Functori, *) -(* SPDX-FileCopyrightText: 2024 Nomadic Labs, *) -(* *) -(*****************************************************************************) - -(** Helper functions to load/store the level of the last processed L1 block by - the crawler from/to disk. *) - -type t - -(** Initializes a KVS store at the given location to remember the last processed - level by the crawler on disk. *) -val init : root_dir:string -> t tzresult Lwt.t - -(** [load_last_processed_level t ] loads and returns the content of - [data_dir/last_processed_level.json] as an int32. If the file doesn't - exists, the result is [None]. The function returns - [Failed_to_load_last_processed_level] in the error monad in case of - failure. *) -val load_last_processed_level : t -> int32 option tzresult Lwt.t - -(** [save_last_processed_level t ~level] saves the value - of [last_processed_level] into [data_dir/last_processed_level.json], - overriding any previous value. The function returns - [Failed_to_save_last_processed_level] in the error monad in case of - failure. *) -val save_last_processed_level : t -> level:int32 -> unit tzresult Lwt.t diff --git a/src/bin_dal_node/single_value_store.ml b/src/bin_dal_node/single_value_store.ml new file mode 100644 index 000000000000..a64122148e3a --- /dev/null +++ b/src/bin_dal_node/single_value_store.ml @@ -0,0 +1,57 @@ +(*****************************************************************************) +(* *) +(* SPDX-License-Identifier: MIT *) +(* SPDX-FileCopyrightText: 2024 Functori, *) +(* SPDX-FileCopyrightText: 2024 Nomadic Labs, *) +(* *) +(*****************************************************************************) + +module type VALUE = sig + type t + + val name : string + + val encoding : t Data_encoding.t +end + +module type S = sig + type value + + type t + + val init : root_dir:string -> t tzresult Lwt.t + + val load : t -> value option tzresult Lwt.t + + val save : t -> value -> unit tzresult Lwt.t +end + +module Make (Value : VALUE) : S with type value = Value.t = struct + module KVS = Key_value_store + + type value = Value.t + + type nonrec t = (unit, unit, Value.t) KVS.t + + let init ~root_dir : t tzresult Lwt.t = KVS.init ~lru_size:1 ~root_dir + + let file_layout ~root_dir () = + let filepath = Filename.concat root_dir Value.name in + Key_value_store.layout + ~encoding:Value.encoding + ~filepath + ~eq:Stdlib.( = ) + ~index_of:(fun () -> 0) + ~number_of_keys_per_file:1 + () + + let load t = + let open Lwt_result_syntax in + let* exists = KVS.value_exists t file_layout () () in + if exists then + let* res = KVS.read_value t file_layout () () in + return_some res + else return_none + + let save t value = KVS.write_value ~override:true t file_layout () () value +end diff --git a/src/bin_dal_node/single_value_store.mli b/src/bin_dal_node/single_value_store.mli new file mode 100644 index 000000000000..c78b8c0e5b07 --- /dev/null +++ b/src/bin_dal_node/single_value_store.mli @@ -0,0 +1,36 @@ +(*****************************************************************************) +(* *) +(* SPDX-License-Identifier: MIT *) +(* SPDX-FileCopyrightText: 2024 Functori, *) +(* SPDX-FileCopyrightText: 2024 Nomadic Labs, *) +(* *) +(*****************************************************************************) + +(** Helper module to load/store a single value from/to disk. *) + +module type VALUE = sig + type t + + val name : string + + val encoding : t Data_encoding.t +end + +module type S = sig + type value + + type t + + (** Initializes a KVS store at the given location to remember some value. *) + val init : root_dir:string -> t tzresult Lwt.t + + (** [load t] loads the value from store [t]. If the value is not found, the + result is [None]. *) + val load : t -> value option tzresult Lwt.t + + (** [save t value] saves the [value] into the store [t], overriding + any previous value. *) + val save : t -> value -> unit tzresult Lwt.t +end + +module Make : functor (Value : VALUE) -> S with type value = Value.t diff --git a/src/bin_dal_node/store.ml b/src/bin_dal_node/store.ml index 4fffdd3663c0..4e64b184eb8c 100644 --- a/src/bin_dal_node/store.ml +++ b/src/bin_dal_node/store.ml @@ -404,6 +404,14 @@ module Commitment_indexed_cache = let hash = Hashtbl.hash end) +module Last_processed_level = Single_value_store.Make (struct + type t = int32 + + let name = "last_processed_level" + + let encoding = Data_encoding.int32 +end) + (** Store context *) type t = { slot_header_statuses : Statuses.t; diff --git a/src/bin_dal_node/store.mli b/src/bin_dal_node/store.mli index a4f34b6a4ac7..b3193c638a0b 100644 --- a/src/bin_dal_node/store.mli +++ b/src/bin_dal_node/store.mli @@ -127,6 +127,8 @@ module Commitment_indexed_cache : sig val find_opt : 'a t -> commitment -> 'a option end +module Last_processed_level : Single_value_store.S with type value = int32 + type t = private { slot_header_statuses : Statuses.t; (** Statuses store *) shards : Shards.t; (** Shards store *) -- GitLab From d7210b29003a4e7367cf537531059cd64730d657 Mon Sep 17 00:00:00 2001 From: Eugen Zalinescu Date: Tue, 6 Aug 2024 16:08:34 +0200 Subject: [PATCH 3/5] DAL/Node: store the first seen head level --- src/bin_dal_node/daemon.ml | 10 +++++++++- src/bin_dal_node/store.ml | 11 +++++++++++ src/bin_dal_node/store.mli | 3 +++ 3 files changed, 23 insertions(+), 1 deletion(-) diff --git a/src/bin_dal_node/daemon.ml b/src/bin_dal_node/daemon.ml index adf3af1a926c..70f70db4472a 100644 --- a/src/bin_dal_node/daemon.ml +++ b/src/bin_dal_node/daemon.ml @@ -1000,13 +1000,21 @@ let run ~data_dir ~configuration_override = in let* store = Store.init config in let*! metrics_server = Metrics.launch config.metrics_addr in + let* first_seen_level = Store.First_seen_level.load store.first_seen_level in let* last_processed_level = Store.Last_processed_level.load store.last_processed_level in (* First wait for the L1 node to be bootstrapped. *) let* () = wait_for_l1_bootstrapped cctxt in (* Check the DAL node's and L1 node's history mode. *) - let* ((_, _, proto_parameters) as plugin_info) = get_head_plugin cctxt in + let* ((head_level, _, proto_parameters) as plugin_info) = + get_head_plugin cctxt + in + let* () = + match first_seen_level with + | None -> Store.First_seen_level.save store.first_seen_level head_level + | Some _ -> return_unit + in let* profile_ctxt = build_profile_context config in let*? () = Profile_manager.validate_slot_indexes diff --git a/src/bin_dal_node/store.ml b/src/bin_dal_node/store.ml index 4e64b184eb8c..bacb4e55f23a 100644 --- a/src/bin_dal_node/store.ml +++ b/src/bin_dal_node/store.ml @@ -412,6 +412,14 @@ module Last_processed_level = Single_value_store.Make (struct let encoding = Data_encoding.int32 end) +module First_seen_level = Single_value_store.Make (struct + type t = int32 + + let name = "first_seen_level" + + let encoding = Data_encoding.int32 +end) + (** Store context *) type t = { slot_header_statuses : Statuses.t; @@ -424,6 +432,7 @@ type t = { (* The length of the array is the number of shards per slot *) finalized_commitments : Slot_id_cache.t; last_processed_level : Last_processed_level.t; + first_seen_level : First_seen_level.t; } let cache_entry node_store commitment slot shares shard_proofs = @@ -576,6 +585,7 @@ let init config = let* slots = Slots.init base_dir Stores_dirs.slot in let* skip_list_cells = init_skip_list_cells_store base_dir in let* last_processed_level = Last_processed_level.init ~root_dir:base_dir in + let* first_seen_level = First_seen_level.init ~root_dir:base_dir in let*! () = Event.(emit store_is_ready ()) in return { @@ -587,6 +597,7 @@ let init config = finalized_commitments = Slot_id_cache.create ~capacity:Constants.slot_id_cache_size; last_processed_level; + first_seen_level; } let add_slot_headers ~number_of_slots ~block_level slot_headers t = diff --git a/src/bin_dal_node/store.mli b/src/bin_dal_node/store.mli index b3193c638a0b..e1c1f8bd9469 100644 --- a/src/bin_dal_node/store.mli +++ b/src/bin_dal_node/store.mli @@ -129,6 +129,8 @@ end module Last_processed_level : Single_value_store.S with type value = int32 +module First_seen_level : Single_value_store.S with type value = int32 + type t = private { slot_header_statuses : Statuses.t; (** Statuses store *) shards : Shards.t; (** Shards store *) @@ -144,6 +146,7 @@ type t = private { than [number_of_slots] commitments can be stored per level. *) last_processed_level : Last_processed_level.t; (** Last processed level store *) + first_seen_level : First_seen_level.t; (** First seen level store *) } (** [cache_entry store commitment entry] adds or replace an entry to -- GitLab From a01b132f6a78205b917d6fd955ecfd591c96a344 Mon Sep 17 00:00:00 2001 From: Eugen Zalinescu Date: Wed, 7 Aug 2024 07:27:57 +0200 Subject: [PATCH 4/5] DAL/Node: storage period depends on the first seen level --- src/bin_dal_node/daemon.ml | 120 +++++++++++++++++++++++++------------ 1 file changed, 83 insertions(+), 37 deletions(-) diff --git a/src/bin_dal_node/daemon.ml b/src/bin_dal_node/daemon.ml index 70f70db4472a..eb609f3e9a8e 100644 --- a/src/bin_dal_node/daemon.ml +++ b/src/bin_dal_node/daemon.ml @@ -681,9 +681,49 @@ let check_history_mode config profile_ctxt proto_parameters = else return_unit | Rolling {blocks = `Auto} | Full -> return_unit +(* We need more levels because [store_skip_list_cells level] needs the plugin + for [attestation_lag + 1] levels in the past wrt to the target [level]. Note + {!get_storage_period} refers to published levels (not attested levels). The + plus one comes from the technical details of {!store_skip_list_cells}. *) +let skip_list_offset proto_parameters = + proto_parameters.Dal_plugin.attestation_lag + 1 + +(* This function determines the storage period taking into account the node's + [first_seen_level]. Indeed, if the node started for the first time, we do not + expect it to store skip list cells (even if it supports refutations) for the + entire required period of 3 months, because it will anyway not have the slot + pages for this period. Note that this could be further refined by taking into + account when the corresponding rollup was originated. *) +let get_storage_period profile_ctxt proto_parameters head_level first_seen_level + = + let supports_refutations = + Profile_manager.supports_refutations profile_ctxt + in + let default_storage_period = + Profile_manager.get_attested_data_default_store_period + profile_ctxt + proto_parameters + in + if supports_refutations then + (* This deliberately does not take into account the [last_processed_level]. *) + let online_period = + match first_seen_level with + | None -> 0 + | Some first_seen_level -> + Int32.sub head_level first_seen_level |> Int32.to_int + in + (* Even if the node was not online previously, or it was online only for a + few levels, we still need to store data for the minimal period, defined + in [get_attested_data_default_store_period]. TODO: refactor to expose + this value. *) + let max_period = max (2 * proto_parameters.attestation_lag) online_period in + min max_period default_storage_period + else default_storage_period + (* This function checks the L1 node stores enough block data for the DAL node to function correctly. *) -let check_l1_history_mode profile_ctxt cctxt proto_parameters = +let check_l1_history_mode profile_ctxt cctxt proto_parameters head_level + first_level = let open Lwt_result_syntax in let* l1_history_mode = let* l1_mode, blocks_preservation_cycles_opt = @@ -723,21 +763,15 @@ let check_l1_history_mode profile_ctxt cctxt proto_parameters = in match l1_history_mode with | `L1_archive -> return_unit - | `L1_rolling c -> - let b = - Profile_manager.get_attested_data_default_store_period - profile_ctxt - proto_parameters - in - let b = + | `L1_rolling l1_cycles -> + let dal_blocks = + get_storage_period profile_ctxt proto_parameters head_level first_level + + if Profile_manager.supports_refutations profile_ctxt then - (* We need more levels because [store_skip_list_cells level] needs the - plugin for [attestation_lag + 1] levels in the past wrt to the - target [level]. *) - b + proto_parameters.attestation_lag + 1 - else b + skip_list_offset proto_parameters + else 0 in - check ~dal_blocks:b ~l1_cycles:c + check ~dal_blocks ~l1_cycles let build_profile_context config = let open Lwt_result_syntax in @@ -746,7 +780,7 @@ let build_profile_context config = match res with | Ok loaded_profile -> (* The profiles from the loaded context are prioritized over the - profiles provided in the config file. *) + profiles provided in the config file. *) Profile_manager.merge_profiles ~lower_prio:config.Configuration_file.profile ~higher_prio:loaded_profile @@ -771,7 +805,7 @@ let update_and_register_profiles ctxt = (* This function fetches the protocol plugins for levels for which it is needed to add skip list cells. It starts by computing the oldest level at which it will be needed to add skip list cells. *) -let get_proto_plugins cctxt profile_ctxt last_processed_level +let get_proto_plugins cctxt profile_ctxt ~last_processed_level ~first_seen_level (head_level, (module Plugin : Dal_plugin.T), proto_parameters) = (* We resolve the plugins for all levels starting with [(max last_processed_level (head_level - storage_period)], or (max @@ -783,9 +817,7 @@ let get_proto_plugins cctxt profile_ctxt last_processed_level parameters and the relevant encodings do not change for now, so the head plugin could be used). *) let storage_period = - Profile_manager.get_attested_data_default_store_period - profile_ctxt - proto_parameters + get_storage_period profile_ctxt proto_parameters head_level first_seen_level in let first_level = Int32.max @@ -794,8 +826,7 @@ let get_proto_plugins cctxt profile_ctxt last_processed_level in let first_level = if Profile_manager.supports_refutations profile_ctxt then - (* See usage of the plugin in [store_skip_list_cells] *) - Int32.(sub first_level (of_int (1 + proto_parameters.attestation_lag))) + Int32.sub first_level (Int32.of_int (skip_list_offset proto_parameters)) else first_level in let first_level = Int32.(max 1l first_level) in @@ -814,7 +845,7 @@ let get_proto_plugins cctxt profile_ctxt last_processed_level and we don't detect it if this code starts running just before the migration level, and the head changes meanwhile to be above the migration level. *) -let clean_up_store ctxt cctxt ~last_processed_level +let clean_up_store ctxt cctxt ~last_processed_level ~first_seen_level (head_level, (module Plugin : Dal_plugin.T), proto_parameters) = let open Lwt_result_syntax in let store_skip_list_cells ~level = @@ -838,29 +869,27 @@ let clean_up_store ctxt cctxt ~last_processed_level (* [target_level] identifies the level wrt to head level at which we want to start the P2P and process blocks as usual. *) let target_level head_level = Int32.(sub head_level 2l) in - let first_level_for_skip_list_storage period head_level = - (* We consider that [period] refers to published levels (not attested - levels). The plus one comes from the technical details of - {store_skip_list_cells}. Note that behind this first level we do not have + let first_level_for_skip_list_storage period level = + (* Note that behind this first level we do not have the plugin. *) - Int32.( - sub - head_level - (of_int (period - (proto_parameters.Dal_plugin.attestation_lag + 1)))) + Int32.(sub level (of_int period)) in let should_store_skip_list_cells ~head_level ~level = let profile_ctxt = Node_context.get_profile_ctxt ctxt in let period = - Profile_manager.get_attested_data_default_store_period + get_storage_period profile_ctxt proto_parameters + head_level + first_seen_level + + skip_list_offset proto_parameters in supports_refutations && level >= first_level_for_skip_list_storage period head_level in let rec do_clean_up last_processed_level head_level = let last_level = target_level head_level in - let rec clean_up_from_level level = + let rec clean_up_at_level level = if level > last_level then return_unit else let*! () = @@ -874,10 +903,10 @@ let clean_up_store ctxt cctxt ~last_processed_level let* () = Store.Last_processed_level.save store.last_processed_level level in - clean_up_from_level (Int32.succ level) + clean_up_at_level (Int32.succ level) in (* Clean up from [last_processed_level] to [last_level]. *) - let* () = clean_up_from_level (Int32.succ last_processed_level) in + let* () = clean_up_at_level (Int32.succ last_processed_level) in (* As this iteration may be slow, the head level might have advanced in the meanwhile. *) let* header = @@ -1022,7 +1051,14 @@ let run ~data_dir ~configuration_override = ~number_of_slots:proto_parameters.number_of_slots in let* () = check_history_mode config profile_ctxt proto_parameters in - let* () = check_l1_history_mode profile_ctxt cctxt proto_parameters in + let* () = + check_l1_history_mode + profile_ctxt + cctxt + proto_parameters + head_level + first_seen_level + in (* Initialize the crypto process *) (* FIXME: https://gitlab.com/tezos/tezos/-/issues/5743 Instead of recomputing these parameters, they could be stored @@ -1044,7 +1080,12 @@ let run ~data_dir ~configuration_override = (Cryptobox.Internal_for_tests.encoded_share_size cryptobox) ; Value_size_hooks.set_number_of_slots proto_parameters.number_of_slots ; let* proto_plugins = - get_proto_plugins cctxt profile_ctxt last_processed_level plugin_info + get_proto_plugins + cctxt + profile_ctxt + ~last_processed_level + ~first_seen_level + plugin_info in let ctxt = Node_context.init @@ -1064,7 +1105,12 @@ let run ~data_dir ~configuration_override = match last_processed_level with | None -> (* there's nothing to clean up *) return_unit | Some last_processed_level -> - clean_up_store ctxt cctxt ~last_processed_level plugin_info + clean_up_store + ctxt + cctxt + ~last_processed_level + ~first_seen_level + plugin_info in let* crawler = (* We reload the last processed level because [clean_up_store] has likely -- GitLab From 3a7795c705480a3b665f1cce6abd374b7dcc4362 Mon Sep 17 00:00:00 2001 From: Eugen Zalinescu Date: Fri, 16 Aug 2024 09:13:22 +0200 Subject: [PATCH 5/5] DAL/Tests: adapt l1_history_mode --- tezt/lib_tezos/dal_common.ml | 14 +++++++++++++- tezt/lib_tezos/dal_common.mli | 18 ++++++++++++++---- tezt/tests/dal.ml | 19 +++++++++++++++++-- 3 files changed, 44 insertions(+), 7 deletions(-) diff --git a/tezt/lib_tezos/dal_common.ml b/tezt/lib_tezos/dal_common.ml index 62e224e2fb98..046f3431602c 100644 --- a/tezt/lib_tezos/dal_common.ml +++ b/tezt/lib_tezos/dal_common.ml @@ -81,7 +81,7 @@ module Parameters = struct in from_protocol_parameters json |> return - let storage_period_with_refutation_in_cycles ~proto_parameters = + let full_storage_period_with_refutation_in_cycles ~proto_parameters = let blocks_per_cycle = JSON.(proto_parameters |-> "blocks_per_cycle" |> as_int) in @@ -110,6 +110,18 @@ module Parameters = struct if blocks mod blocks_per_cycle = 0 then blocks / blocks_per_cycle else 1 + (blocks / blocks_per_cycle) + let initial_storage_period_with_refutation_in_cycles ~proto_parameters = + let blocks_per_cycle = + JSON.(proto_parameters |-> "blocks_per_cycle" |> as_int) + in + let attestation_lag = + JSON.( + proto_parameters |-> "dal_parametric" |-> "attestation_lag" |> as_int) + in + let blocks = (3 * attestation_lag) + 1 in + if blocks mod blocks_per_cycle = 0 then blocks / blocks_per_cycle + else 1 + (blocks / blocks_per_cycle) + let storage_period_without_refutation_in_cycles ~proto_parameters = let blocks_per_cycle = JSON.(proto_parameters |-> "blocks_per_cycle" |> as_int) diff --git a/tezt/lib_tezos/dal_common.mli b/tezt/lib_tezos/dal_common.mli index 1bd34012b0b9..f350938d3ac3 100644 --- a/tezt/lib_tezos/dal_common.mli +++ b/tezt/lib_tezos/dal_common.mli @@ -44,10 +44,20 @@ module Parameters : sig val from_client : Client.t -> t Lwt.t (* This function computes the period (in cycles) during which the node stores - data about attested slots assuming the node supports refutations. See the - functions [Profile_manager.get_attested_data_default_store_period] and - [Daemon.get_proto_plugins] in src/bin_dal_node/. *) - val storage_period_with_refutation_in_cycles : proto_parameters:JSON.t -> int + data about attested slots assuming the node supports refutations and it has + been start sufficiently far in the past. See the functions + [Profile_manager.get_attested_data_default_store_period] and + [Daemon.get_storage_period] in src/bin_dal_node/. *) + val full_storage_period_with_refutation_in_cycles : + proto_parameters:JSON.t -> int + + (* This function computes the period (in cycles) during which the node stores + data about attested slots assuming the node supports refutations and it + hash just been started. See the functions + [Profile_manager.get_attested_data_default_store_period] and + [Daemon.get_storage_period] in src/bin_dal_node/. *) + val initial_storage_period_with_refutation_in_cycles : + proto_parameters:JSON.t -> int (* This function computes the period (in cycles) during which the node stores data about attested slots assuming the node does not supports diff --git a/tezt/tests/dal.ml b/tezt/tests/dal.ml index 33bdb44d4839..89bbc8774fc1 100644 --- a/tezt/tests/dal.ml +++ b/tezt/tests/dal.ml @@ -262,6 +262,10 @@ let slot_size_param slot_size = (* Some initialization functions to start needed nodes. *) type l1_history_mode = | Default_with_refutation + (* to be used when the node starts for the first time *) + | Default_with_refutation_full + (* to be used when the node restarts, and it was first started more than the + default storage period time ago *) | Default_without_refutation | Custom of Node.history_mode @@ -316,7 +320,17 @@ let history_mode base protocol parameter_overrides proto_parameters return (parameter_overrides, Node.Rolling (Some additional_cycles)) | Default_with_refutation -> let cycles = - Dal.Parameters.storage_period_with_refutation_in_cycles + Dal.Parameters.initial_storage_period_with_refutation_in_cycles + ~proto_parameters + in + let blocks_preservation_cycles = + JSON.(proto_parameters |-> "blocks_preservation_cycles" |> as_int) + in + let additional_cycles = cycles - blocks_preservation_cycles in + return (parameter_overrides, Node.Rolling (Some additional_cycles)) + | Default_with_refutation_full -> + let cycles = + Dal.Parameters.full_storage_period_with_refutation_in_cycles ~proto_parameters in let blocks_preservation_cycles = @@ -343,7 +357,7 @@ let history_mode base protocol parameter_overrides proto_parameters 'dal_attested_slots_validity_lag' by a factor of %d." factor ; let cycles = - Dal.Parameters.storage_period_with_refutation_in_cycles + Dal.Parameters.full_storage_period_with_refutation_in_cycles ~proto_parameters in Log.info @@ -8054,6 +8068,7 @@ let register ~protocols = ~tags:["restart"] ~activation_timestamp:Now ~producer_profiles:[0] + ~l1_history_mode:(Custom (Rolling (Some 5))) "restart DAL node (producer)" test_restart_dal_node protocols ; -- GitLab