diff --git a/src/bin_dal_node/daemon.ml b/src/bin_dal_node/daemon.ml index a9bc34445677b23a5ab5cbbf402490ab03c16d75..eb609f3e9a8ec100ed932724194a707210eb4479 100644 --- a/src/bin_dal_node/daemon.ml +++ b/src/bin_dal_node/daemon.ml @@ -402,6 +402,7 @@ module Handler = struct let process_block ctxt cctxt proto_parameters finalized_shell_header = let open Lwt_result_syntax in + let store = Node_context.get_store ctxt in let block_level = finalized_shell_header.Block_header.level in let block = `Level block_level in let pred_level = Int32.pred block_level in @@ -431,7 +432,7 @@ module Handler = struct ~number_of_slots:proto_parameters.Dal_plugin.number_of_slots ~block_level slot_headers - (Node_context.get_store ctxt) + store else return_unit in let* () = @@ -451,7 +452,7 @@ module Handler = struct Slot_manager.publish_slot_data ~level_committee:(Node_context.fetch_committee ctxt) ~slot_size:proto_parameters.cryptobox_parameters.slot_size - (Node_context.get_store ctxt) + store (Node_context.get_gs_worker ctxt) proto_parameters commitment @@ -466,7 +467,7 @@ module Handler = struct ~attestation_lag:proto_parameters.attestation_lag ~number_of_slots:proto_parameters.number_of_slots (Plugin.is_attested attested_slots) - (Node_context.get_store ctxt) + store in let*! () = remove_unattested_slots_and_shards @@ -487,9 +488,7 @@ module Handler = struct Event.(emit layer1_node_final_block (block_level, block_round)) in (* This should be done at the end of the function. *) - Last_processed_level.save_last_processed_level - (Node_context.get_last_processed_level_store ctxt) - ~level:block_level + Store.Last_processed_level.save store.last_processed_level block_level let rec try_process_block ~retries ctxt cctxt proto_parameters finalized_shell_header = @@ -682,9 +681,49 @@ let check_history_mode config profile_ctxt proto_parameters = else return_unit | Rolling {blocks = `Auto} | Full -> return_unit +(* We need more levels because [store_skip_list_cells level] needs the plugin + for [attestation_lag + 1] levels in the past wrt to the target [level]. Note + {!get_storage_period} refers to published levels (not attested levels). The + plus one comes from the technical details of {!store_skip_list_cells}. *) +let skip_list_offset proto_parameters = + proto_parameters.Dal_plugin.attestation_lag + 1 + +(* This function determines the storage period taking into account the node's + [first_seen_level]. Indeed, if the node started for the first time, we do not + expect it to store skip list cells (even if it supports refutations) for the + entire required period of 3 months, because it will anyway not have the slot + pages for this period. Note that this could be further refined by taking into + account when the corresponding rollup was originated. *) +let get_storage_period profile_ctxt proto_parameters head_level first_seen_level + = + let supports_refutations = + Profile_manager.supports_refutations profile_ctxt + in + let default_storage_period = + Profile_manager.get_attested_data_default_store_period + profile_ctxt + proto_parameters + in + if supports_refutations then + (* This deliberately does not take into account the [last_processed_level]. *) + let online_period = + match first_seen_level with + | None -> 0 + | Some first_seen_level -> + Int32.sub head_level first_seen_level |> Int32.to_int + in + (* Even if the node was not online previously, or it was online only for a + few levels, we still need to store data for the minimal period, defined + in [get_attested_data_default_store_period]. TODO: refactor to expose + this value. *) + let max_period = max (2 * proto_parameters.attestation_lag) online_period in + min max_period default_storage_period + else default_storage_period + (* This function checks the L1 node stores enough block data for the DAL node to function correctly. *) -let check_l1_history_mode profile_ctxt cctxt proto_parameters = +let check_l1_history_mode profile_ctxt cctxt proto_parameters head_level + first_level = let open Lwt_result_syntax in let* l1_history_mode = let* l1_mode, blocks_preservation_cycles_opt = @@ -724,21 +763,15 @@ let check_l1_history_mode profile_ctxt cctxt proto_parameters = in match l1_history_mode with | `L1_archive -> return_unit - | `L1_rolling c -> - let b = - Profile_manager.get_attested_data_default_store_period - profile_ctxt - proto_parameters - in - let b = + | `L1_rolling l1_cycles -> + let dal_blocks = + get_storage_period profile_ctxt proto_parameters head_level first_level + + if Profile_manager.supports_refutations profile_ctxt then - (* We need more levels because [store_skip_list_cells level] needs the - plugin for [attestation_lag + 1] levels in the past wrt to the - target [level]. *) - b + proto_parameters.attestation_lag + 1 - else b + skip_list_offset proto_parameters + else 0 in - check ~dal_blocks:b ~l1_cycles:c + check ~dal_blocks ~l1_cycles let build_profile_context config = let open Lwt_result_syntax in @@ -747,7 +780,7 @@ let build_profile_context config = match res with | Ok loaded_profile -> (* The profiles from the loaded context are prioritized over the - profiles provided in the config file. *) + profiles provided in the config file. *) Profile_manager.merge_profiles ~lower_prio:config.Configuration_file.profile ~higher_prio:loaded_profile @@ -772,7 +805,7 @@ let update_and_register_profiles ctxt = (* This function fetches the protocol plugins for levels for which it is needed to add skip list cells. It starts by computing the oldest level at which it will be needed to add skip list cells. *) -let get_proto_plugins cctxt profile_ctxt last_processed_level +let get_proto_plugins cctxt profile_ctxt ~last_processed_level ~first_seen_level (head_level, (module Plugin : Dal_plugin.T), proto_parameters) = (* We resolve the plugins for all levels starting with [(max last_processed_level (head_level - storage_period)], or (max @@ -784,9 +817,7 @@ let get_proto_plugins cctxt profile_ctxt last_processed_level parameters and the relevant encodings do not change for now, so the head plugin could be used). *) let storage_period = - Profile_manager.get_attested_data_default_store_period - profile_ctxt - proto_parameters + get_storage_period profile_ctxt proto_parameters head_level first_seen_level in let first_level = Int32.max @@ -795,8 +826,7 @@ let get_proto_plugins cctxt profile_ctxt last_processed_level in let first_level = if Profile_manager.supports_refutations profile_ctxt then - (* See usage of the plugin in [store_skip_list_cells] *) - Int32.(sub first_level (of_int (1 + proto_parameters.attestation_lag))) + Int32.sub first_level (Int32.of_int (skip_list_offset proto_parameters)) else first_level in let first_level = Int32.(max 1l first_level) in @@ -815,7 +845,7 @@ let get_proto_plugins cctxt profile_ctxt last_processed_level and we don't detect it if this code starts running just before the migration level, and the head changes meanwhile to be above the migration level. *) -let clean_up_store ctxt cctxt ~last_processed_level +let clean_up_store ctxt cctxt ~last_processed_level ~first_seen_level (head_level, (module Plugin : Dal_plugin.T), proto_parameters) = let open Lwt_result_syntax in let store_skip_list_cells ~level = @@ -834,34 +864,32 @@ let clean_up_store ctxt cctxt ~last_processed_level level (module Plugin : Dal_plugin.T with type block_info = Plugin.block_info) in - let lpl_store = Node_context.get_last_processed_level_store ctxt in + let store = Node_context.get_store ctxt in let supports_refutations = Handler.supports_refutations ctxt in (* [target_level] identifies the level wrt to head level at which we want to start the P2P and process blocks as usual. *) let target_level head_level = Int32.(sub head_level 2l) in - let first_level_for_skip_list_storage period head_level = - (* We consider that [period] refers to published levels (not attested - levels). The plus one comes from the technical details of - {store_skip_list_cells}. Note that behind this first level we do not have + let first_level_for_skip_list_storage period level = + (* Note that behind this first level we do not have the plugin. *) - Int32.( - sub - head_level - (of_int (period - (proto_parameters.Dal_plugin.attestation_lag + 1)))) + Int32.(sub level (of_int period)) in let should_store_skip_list_cells ~head_level ~level = let profile_ctxt = Node_context.get_profile_ctxt ctxt in let period = - Profile_manager.get_attested_data_default_store_period + get_storage_period profile_ctxt proto_parameters + head_level + first_seen_level + + skip_list_offset proto_parameters in supports_refutations && level >= first_level_for_skip_list_storage period head_level in let rec do_clean_up last_processed_level head_level = let last_level = target_level head_level in - let rec clean_up_from_level level = + let rec clean_up_at_level level = if level > last_level then return_unit else let*! () = @@ -873,12 +901,12 @@ let clean_up_store ctxt cctxt ~last_processed_level else return_unit in let* () = - Last_processed_level.save_last_processed_level lpl_store ~level + Store.Last_processed_level.save store.last_processed_level level in - clean_up_from_level (Int32.succ level) + clean_up_at_level (Int32.succ level) in (* Clean up from [last_processed_level] to [last_level]. *) - let* () = clean_up_from_level (Int32.succ last_processed_level) in + let* () = clean_up_at_level (Int32.succ last_processed_level) in (* As this iteration may be slow, the head level might have advanced in the meanwhile. *) let* header = @@ -1001,16 +1029,21 @@ let run ~data_dir ~configuration_override = in let* store = Store.init config in let*! metrics_server = Metrics.launch config.metrics_addr in - let* last_processed_level_store = - Last_processed_level.init ~root_dir:(Configuration_file.store_path config) - in + let* first_seen_level = Store.First_seen_level.load store.first_seen_level in let* last_processed_level = - Last_processed_level.load_last_processed_level last_processed_level_store + Store.Last_processed_level.load store.last_processed_level in (* First wait for the L1 node to be bootstrapped. *) let* () = wait_for_l1_bootstrapped cctxt in (* Check the DAL node's and L1 node's history mode. *) - let* ((_, _, proto_parameters) as plugin_info) = get_head_plugin cctxt in + let* ((head_level, _, proto_parameters) as plugin_info) = + get_head_plugin cctxt + in + let* () = + match first_seen_level with + | None -> Store.First_seen_level.save store.first_seen_level head_level + | Some _ -> return_unit + in let* profile_ctxt = build_profile_context config in let*? () = Profile_manager.validate_slot_indexes @@ -1018,7 +1051,14 @@ let run ~data_dir ~configuration_override = ~number_of_slots:proto_parameters.number_of_slots in let* () = check_history_mode config profile_ctxt proto_parameters in - let* () = check_l1_history_mode profile_ctxt cctxt proto_parameters in + let* () = + check_l1_history_mode + profile_ctxt + cctxt + proto_parameters + head_level + first_seen_level + in (* Initialize the crypto process *) (* FIXME: https://gitlab.com/tezos/tezos/-/issues/5743 Instead of recomputing these parameters, they could be stored @@ -1040,7 +1080,12 @@ let run ~data_dir ~configuration_override = (Cryptobox.Internal_for_tests.encoded_share_size cryptobox) ; Value_size_hooks.set_number_of_slots proto_parameters.number_of_slots ; let* proto_plugins = - get_proto_plugins cctxt profile_ctxt last_processed_level plugin_info + get_proto_plugins + cctxt + profile_ctxt + ~last_processed_level + ~first_seen_level + plugin_info in let ctxt = Node_context.init @@ -1055,19 +1100,23 @@ let run ~data_dir ~configuration_override = transport_layer cctxt metrics_server - last_processed_level_store in let* () = match last_processed_level with | None -> (* there's nothing to clean up *) return_unit | Some last_processed_level -> - clean_up_store ctxt cctxt ~last_processed_level plugin_info + clean_up_store + ctxt + cctxt + ~last_processed_level + ~first_seen_level + plugin_info in let* crawler = (* We reload the last processed level because [clean_up_store] has likely modified it. *) let* last_notified_level = - Last_processed_level.load_last_processed_level last_processed_level_store + Store.Last_processed_level.load store.last_processed_level in let open Constants in let*! crawler = diff --git a/src/bin_dal_node/last_processed_level.ml b/src/bin_dal_node/last_processed_level.ml deleted file mode 100644 index fea3c30f538b466ad2d5410b3744105c0baec668..0000000000000000000000000000000000000000 --- a/src/bin_dal_node/last_processed_level.ml +++ /dev/null @@ -1,36 +0,0 @@ -(*****************************************************************************) -(* *) -(* SPDX-License-Identifier: MIT *) -(* SPDX-FileCopyrightText: 2024 Functori, *) -(* SPDX-FileCopyrightText: 2024 Nomadic Labs, *) -(* *) -(*****************************************************************************) - -let last_processed_level_filename = "last_processed_level" - -module KVS = Key_value_store - -type nonrec t = (unit, unit, int32) KVS.t - -let init ~root_dir : t tzresult Lwt.t = KVS.init ~lru_size:1 ~root_dir - -let file_layout ~root_dir () = - let filepath = Filename.concat root_dir last_processed_level_filename in - Key_value_store.layout - ~encoding:Data_encoding.int32 - ~filepath - ~eq:Stdlib.( = ) - ~index_of:(fun () -> 0) - ~number_of_keys_per_file:1 - () - -let load_last_processed_level (t : t) = - let open Lwt_result_syntax in - let* exists = KVS.value_exists t file_layout () () in - if exists then - let* res = KVS.read_value t file_layout () () in - return_some res - else return_none - -let save_last_processed_level (t : t) ~level = - KVS.write_value ~override:true t file_layout () () level diff --git a/src/bin_dal_node/last_processed_level.mli b/src/bin_dal_node/last_processed_level.mli deleted file mode 100644 index 6efc7a6b8ecdeb3458e83197e4f9d4cd6c8af5a1..0000000000000000000000000000000000000000 --- a/src/bin_dal_node/last_processed_level.mli +++ /dev/null @@ -1,30 +0,0 @@ -(*****************************************************************************) -(* *) -(* SPDX-License-Identifier: MIT *) -(* SPDX-FileCopyrightText: 2024 Functori, *) -(* SPDX-FileCopyrightText: 2024 Nomadic Labs, *) -(* *) -(*****************************************************************************) - -(** Helper functions to load/store the level of the last processed L1 block by - the crawler from/to disk. *) - -type t - -(** Initializes a KVS store at the given location to remember the last processed - level by the crawler on disk. *) -val init : root_dir:string -> t tzresult Lwt.t - -(** [load_last_processed_level t ] loads and returns the content of - [data_dir/last_processed_level.json] as an int32. If the file doesn't - exists, the result is [None]. The function returns - [Failed_to_load_last_processed_level] in the error monad in case of - failure. *) -val load_last_processed_level : t -> int32 option tzresult Lwt.t - -(** [save_last_processed_level t ~level] saves the value - of [last_processed_level] into [data_dir/last_processed_level.json], - overriding any previous value. The function returns - [Failed_to_save_last_processed_level] in the error monad in case of - failure. *) -val save_last_processed_level : t -> level:int32 -> unit tzresult Lwt.t diff --git a/src/bin_dal_node/node_context.ml b/src/bin_dal_node/node_context.ml index 2550f528061b6dc4a02b8e62072f3a9c39ba460d..624783f5fd50bbd11237c3317c1d0ed917dc5293 100644 --- a/src/bin_dal_node/node_context.ml +++ b/src/bin_dal_node/node_context.ml @@ -41,12 +41,11 @@ type t = { transport_layer : Gossipsub.Transport_layer.t; mutable profile_ctxt : Profile_manager.t; metrics_server : Metrics.t; - last_processed_level_store : Last_processed_level.t; } let init config profile_ctxt cryptobox shards_proofs_precomputation proto_parameters proto_plugins store gs_worker transport_layer cctxt - metrics_server last_processed_level_store = + metrics_server = let neighbors_cctxts = List.map (fun Configuration_file.{addr; port} -> @@ -73,7 +72,6 @@ let init config profile_ctxt cryptobox shards_proofs_precomputation transport_layer; profile_ctxt; metrics_server; - last_processed_level_store; } let may_reconstruct ~reconstruct slot_id t = @@ -161,8 +159,6 @@ let get_proto_parameters ctxt = ctxt.proto_parameters let get_shards_proofs_precomputation ctxt = ctxt.shards_proofs_precomputation -let get_last_processed_level_store ctxt = ctxt.last_processed_level_store - let get_store ctxt = ctxt.store let get_gs_worker ctxt = ctxt.gs_worker diff --git a/src/bin_dal_node/node_context.mli b/src/bin_dal_node/node_context.mli index b9fef80093973a073d9492260fda2df5da6f7f1a..a0ebc7ebc6cab19fdc549545de4ed99f3587319f 100644 --- a/src/bin_dal_node/node_context.mli +++ b/src/bin_dal_node/node_context.mli @@ -41,7 +41,6 @@ val init : Gossipsub.Transport_layer.t -> Tezos_rpc.Context.generic -> Metrics.t -> - Last_processed_level.t -> t (** Returns all the registered plugins *) @@ -107,10 +106,6 @@ val get_shards_proofs_precomputation : (** [get_store ctxt] returns the dal node store. *) val get_store : t -> Store.t -(** [get_last_processed_level_store ctxt] returns the last processed level - store. *) -val get_last_processed_level_store : t -> Last_processed_level.t - (** [get_gs_worker ctxt] returns the Gossipsub worker state. *) val get_gs_worker : t -> Gossipsub.Worker.t diff --git a/src/bin_dal_node/single_value_store.ml b/src/bin_dal_node/single_value_store.ml new file mode 100644 index 0000000000000000000000000000000000000000..a64122148e3a3f4c79eecfb88be9999f45b6130d --- /dev/null +++ b/src/bin_dal_node/single_value_store.ml @@ -0,0 +1,57 @@ +(*****************************************************************************) +(* *) +(* SPDX-License-Identifier: MIT *) +(* SPDX-FileCopyrightText: 2024 Functori, *) +(* SPDX-FileCopyrightText: 2024 Nomadic Labs, *) +(* *) +(*****************************************************************************) + +module type VALUE = sig + type t + + val name : string + + val encoding : t Data_encoding.t +end + +module type S = sig + type value + + type t + + val init : root_dir:string -> t tzresult Lwt.t + + val load : t -> value option tzresult Lwt.t + + val save : t -> value -> unit tzresult Lwt.t +end + +module Make (Value : VALUE) : S with type value = Value.t = struct + module KVS = Key_value_store + + type value = Value.t + + type nonrec t = (unit, unit, Value.t) KVS.t + + let init ~root_dir : t tzresult Lwt.t = KVS.init ~lru_size:1 ~root_dir + + let file_layout ~root_dir () = + let filepath = Filename.concat root_dir Value.name in + Key_value_store.layout + ~encoding:Value.encoding + ~filepath + ~eq:Stdlib.( = ) + ~index_of:(fun () -> 0) + ~number_of_keys_per_file:1 + () + + let load t = + let open Lwt_result_syntax in + let* exists = KVS.value_exists t file_layout () () in + if exists then + let* res = KVS.read_value t file_layout () () in + return_some res + else return_none + + let save t value = KVS.write_value ~override:true t file_layout () () value +end diff --git a/src/bin_dal_node/single_value_store.mli b/src/bin_dal_node/single_value_store.mli new file mode 100644 index 0000000000000000000000000000000000000000..c78b8c0e5b072f2f32aaaaeb1ef8d29321212533 --- /dev/null +++ b/src/bin_dal_node/single_value_store.mli @@ -0,0 +1,36 @@ +(*****************************************************************************) +(* *) +(* SPDX-License-Identifier: MIT *) +(* SPDX-FileCopyrightText: 2024 Functori, *) +(* SPDX-FileCopyrightText: 2024 Nomadic Labs, *) +(* *) +(*****************************************************************************) + +(** Helper module to load/store a single value from/to disk. *) + +module type VALUE = sig + type t + + val name : string + + val encoding : t Data_encoding.t +end + +module type S = sig + type value + + type t + + (** Initializes a KVS store at the given location to remember some value. *) + val init : root_dir:string -> t tzresult Lwt.t + + (** [load t] loads the value from store [t]. If the value is not found, the + result is [None]. *) + val load : t -> value option tzresult Lwt.t + + (** [save t value] saves the [value] into the store [t], overriding + any previous value. *) + val save : t -> value -> unit tzresult Lwt.t +end + +module Make : functor (Value : VALUE) -> S with type value = Value.t diff --git a/src/bin_dal_node/store.ml b/src/bin_dal_node/store.ml index a287111912d0253e6e400ca8802f75429e9d7611..bacb4e55f23afb502fa72db86be980af88df2f14 100644 --- a/src/bin_dal_node/store.ml +++ b/src/bin_dal_node/store.ml @@ -404,6 +404,22 @@ module Commitment_indexed_cache = let hash = Hashtbl.hash end) +module Last_processed_level = Single_value_store.Make (struct + type t = int32 + + let name = "last_processed_level" + + let encoding = Data_encoding.int32 +end) + +module First_seen_level = Single_value_store.Make (struct + type t = int32 + + let name = "first_seen_level" + + let encoding = Data_encoding.int32 +end) + (** Store context *) type t = { slot_header_statuses : Statuses.t; @@ -415,6 +431,8 @@ type t = { Commitment_indexed_cache.t; (* The length of the array is the number of shards per slot *) finalized_commitments : Slot_id_cache.t; + last_processed_level : Last_processed_level.t; + first_seen_level : First_seen_level.t; } let cache_entry node_store commitment slot shares shard_proofs = @@ -566,6 +584,8 @@ let init config = let* shards = Shards.init base_dir Stores_dirs.shard in let* slots = Slots.init base_dir Stores_dirs.slot in let* skip_list_cells = init_skip_list_cells_store base_dir in + let* last_processed_level = Last_processed_level.init ~root_dir:base_dir in + let* first_seen_level = First_seen_level.init ~root_dir:base_dir in let*! () = Event.(emit store_is_ready ()) in return { @@ -576,6 +596,8 @@ let init config = cache = Commitment_indexed_cache.create Constants.cache_size; finalized_commitments = Slot_id_cache.create ~capacity:Constants.slot_id_cache_size; + last_processed_level; + first_seen_level; } let add_slot_headers ~number_of_slots ~block_level slot_headers t = diff --git a/src/bin_dal_node/store.mli b/src/bin_dal_node/store.mli index f61e93b9dfec856faa61476b3747895cfb2f3fa7..e1c1f8bd9469cb82dd7faa995aa8485fd2862bd2 100644 --- a/src/bin_dal_node/store.mli +++ b/src/bin_dal_node/store.mli @@ -127,6 +127,10 @@ module Commitment_indexed_cache : sig val find_opt : 'a t -> commitment -> 'a option end +module Last_processed_level : Single_value_store.S with type value = int32 + +module First_seen_level : Single_value_store.S with type value = int32 + type t = private { slot_header_statuses : Statuses.t; (** Statuses store *) shards : Shards.t; (** Shards store *) @@ -140,6 +144,9 @@ type t = private { (** Cache of commitments indexed by level and then by slot id. The maximum number of levels is given by {!Constants.slot_id_cache_size}. No more than [number_of_slots] commitments can be stored per level. *) + last_processed_level : Last_processed_level.t; + (** Last processed level store *) + first_seen_level : First_seen_level.t; (** First seen level store *) } (** [cache_entry store commitment entry] adds or replace an entry to diff --git a/tezt/lib_tezos/dal_common.ml b/tezt/lib_tezos/dal_common.ml index 62e224e2fb9809692c994d2e923ac30788a1e7b8..046f3431602cda4149b3f72eceb2bc9afb6cf394 100644 --- a/tezt/lib_tezos/dal_common.ml +++ b/tezt/lib_tezos/dal_common.ml @@ -81,7 +81,7 @@ module Parameters = struct in from_protocol_parameters json |> return - let storage_period_with_refutation_in_cycles ~proto_parameters = + let full_storage_period_with_refutation_in_cycles ~proto_parameters = let blocks_per_cycle = JSON.(proto_parameters |-> "blocks_per_cycle" |> as_int) in @@ -110,6 +110,18 @@ module Parameters = struct if blocks mod blocks_per_cycle = 0 then blocks / blocks_per_cycle else 1 + (blocks / blocks_per_cycle) + let initial_storage_period_with_refutation_in_cycles ~proto_parameters = + let blocks_per_cycle = + JSON.(proto_parameters |-> "blocks_per_cycle" |> as_int) + in + let attestation_lag = + JSON.( + proto_parameters |-> "dal_parametric" |-> "attestation_lag" |> as_int) + in + let blocks = (3 * attestation_lag) + 1 in + if blocks mod blocks_per_cycle = 0 then blocks / blocks_per_cycle + else 1 + (blocks / blocks_per_cycle) + let storage_period_without_refutation_in_cycles ~proto_parameters = let blocks_per_cycle = JSON.(proto_parameters |-> "blocks_per_cycle" |> as_int) diff --git a/tezt/lib_tezos/dal_common.mli b/tezt/lib_tezos/dal_common.mli index 1bd34012b0b96783e084e73cb3bd8b90749ae580..f350938d3ac3ac3388070f86391b0e8f3813bb0b 100644 --- a/tezt/lib_tezos/dal_common.mli +++ b/tezt/lib_tezos/dal_common.mli @@ -44,10 +44,20 @@ module Parameters : sig val from_client : Client.t -> t Lwt.t (* This function computes the period (in cycles) during which the node stores - data about attested slots assuming the node supports refutations. See the - functions [Profile_manager.get_attested_data_default_store_period] and - [Daemon.get_proto_plugins] in src/bin_dal_node/. *) - val storage_period_with_refutation_in_cycles : proto_parameters:JSON.t -> int + data about attested slots assuming the node supports refutations and it has + been start sufficiently far in the past. See the functions + [Profile_manager.get_attested_data_default_store_period] and + [Daemon.get_storage_period] in src/bin_dal_node/. *) + val full_storage_period_with_refutation_in_cycles : + proto_parameters:JSON.t -> int + + (* This function computes the period (in cycles) during which the node stores + data about attested slots assuming the node supports refutations and it + hash just been started. See the functions + [Profile_manager.get_attested_data_default_store_period] and + [Daemon.get_storage_period] in src/bin_dal_node/. *) + val initial_storage_period_with_refutation_in_cycles : + proto_parameters:JSON.t -> int (* This function computes the period (in cycles) during which the node stores data about attested slots assuming the node does not supports diff --git a/tezt/tests/dal.ml b/tezt/tests/dal.ml index 33bdb44d48397ae97ea8cdfb7e11b863eb61e894..89bbc8774fc18a881e6d82d063e16abf9ab4e6a7 100644 --- a/tezt/tests/dal.ml +++ b/tezt/tests/dal.ml @@ -262,6 +262,10 @@ let slot_size_param slot_size = (* Some initialization functions to start needed nodes. *) type l1_history_mode = | Default_with_refutation + (* to be used when the node starts for the first time *) + | Default_with_refutation_full + (* to be used when the node restarts, and it was first started more than the + default storage period time ago *) | Default_without_refutation | Custom of Node.history_mode @@ -316,7 +320,17 @@ let history_mode base protocol parameter_overrides proto_parameters return (parameter_overrides, Node.Rolling (Some additional_cycles)) | Default_with_refutation -> let cycles = - Dal.Parameters.storage_period_with_refutation_in_cycles + Dal.Parameters.initial_storage_period_with_refutation_in_cycles + ~proto_parameters + in + let blocks_preservation_cycles = + JSON.(proto_parameters |-> "blocks_preservation_cycles" |> as_int) + in + let additional_cycles = cycles - blocks_preservation_cycles in + return (parameter_overrides, Node.Rolling (Some additional_cycles)) + | Default_with_refutation_full -> + let cycles = + Dal.Parameters.full_storage_period_with_refutation_in_cycles ~proto_parameters in let blocks_preservation_cycles = @@ -343,7 +357,7 @@ let history_mode base protocol parameter_overrides proto_parameters 'dal_attested_slots_validity_lag' by a factor of %d." factor ; let cycles = - Dal.Parameters.storage_period_with_refutation_in_cycles + Dal.Parameters.full_storage_period_with_refutation_in_cycles ~proto_parameters in Log.info @@ -8054,6 +8068,7 @@ let register ~protocols = ~tags:["restart"] ~activation_timestamp:Now ~producer_profiles:[0] + ~l1_history_mode:(Custom (Rolling (Some 5))) "restart DAL node (producer)" test_restart_dal_node protocols ;