From 6f78b4baad1e842e949719d4c1a348ae2a532693 Mon Sep 17 00:00:00 2001 From: Victor Allombert Date: Fri, 24 Sep 2021 15:57:25 +0200 Subject: [PATCH 01/13] Store: minor cosmetic changes --- src/lib_store/block_store.ml | 130 ++++++++++++++++------------------ src/lib_store/store_events.ml | 2 +- 2 files changed, 63 insertions(+), 69 deletions(-) diff --git a/src/lib_store/block_store.ml b/src/lib_store/block_store.ml index f878f0397502..66c9c3f5d0cd 100644 --- a/src/lib_store/block_store.ml +++ b/src/lib_store/block_store.ml @@ -582,66 +582,61 @@ let compute_new_savepoint block_store history_mode ~min_level_to_preserve block_store ~head:new_head min_level_to_preserve - >>=? function - | min_block_to_preserve -> ( - let ((_min_block_hash, min_block_level) as min_block_descr) = - Block_repr.descriptor min_block_to_preserve - in - (* New savepoint = min min_level_to_preserve (min new lowest cemented block) *) - let cemented_cycles = - match - Cemented_block_store.cemented_blocks_files - block_store.cemented_store - with - | None -> cycles_to_cement - | Some table -> - (Array.to_list table - |> List.map - (fun {Cemented_block_store.start_level; end_level; _} -> - (start_level, end_level))) - @ cycles_to_cement + >>=? fun min_block_to_preserve -> + let ((_min_block_hash, min_block_level) as min_block_descr) = + Block_repr.descriptor min_block_to_preserve + in + (* New savepoint = min min_level_to_preserve (min new lowest cemented block) *) + let cemented_cycles = + match + Cemented_block_store.cemented_blocks_files block_store.cemented_store + with + | None -> cycles_to_cement + | Some table -> + (Array.to_list table + |> List.map (fun {Cemented_block_store.start_level; end_level; _} -> + (start_level, end_level))) + @ cycles_to_cement + in + if Compare.Int32.(snd savepoint >= min_block_level) then return savepoint + else + let cemented_cycles_len = List.length cemented_cycles in + (* If the offset is 0, the minimum block to preserve will be + the savepoint. *) + if offset = 0 then return min_block_descr + else if + (* If the number of cemented cycles is not yet the offset, + then the savepoint will be unchanged. *) + cemented_cycles_len < offset + then return savepoint + else + (* Else we shift the savepoint by [nb_cycles_to_cement] + cycles *) + let shifted_savepoint_level = + (* new lowest cemented block *) + fst + (List.nth cemented_cycles (cemented_cycles_len - offset) + |> WithExceptions.Option.get ~loc:__LOC__) in - if Compare.Int32.(snd savepoint >= min_block_level) then + (* If the savepoint is still higher than the shifted + savepoint, preserve the savepoint *) + if Compare.Int32.(snd savepoint >= shifted_savepoint_level) then return savepoint + else if + (* If the new savepoint is still higher than the min block + to preserve, we choose the min block to preserve. *) + Compare.Int32.(shifted_savepoint_level >= min_block_level) + then return min_block_descr else - let cemented_cycles_len = List.length cemented_cycles in - (* If the offset is 0, the minimum block to preserve will be - the savepoint. *) - if offset = 0 then return min_block_descr - else if - (* If the number of cemented cycles is not yet the offset, - then the savepoint will be unchanged. *) - cemented_cycles_len < offset - then return savepoint - else - (* Else we shift the savepoint by [nb_cycles_to_cement] - cycles *) - let shifted_savepoint_level = - (* new lowest cemented block *) - fst - (List.nth cemented_cycles (cemented_cycles_len - offset) - |> WithExceptions.Option.get ~loc:__LOC__) - in - (* If the savepoint is still higher than the shifted - savepoint, preserve the savepoint *) - if Compare.Int32.(snd savepoint >= shifted_savepoint_level) then - return savepoint - else if - (* If the new savepoint is still higher than the min block - to preserve, we choose the min block to preserve. *) - Compare.Int32.(shifted_savepoint_level >= min_block_level) - then return min_block_descr - else - (* Else the new savepoint is the one-cycle shifted - savepoint. *) - read_predecessor_block_by_level_opt - block_store - ~head:new_head - shifted_savepoint_level - >>=? function - | None -> - fail (Cannot_retrieve_savepoint shifted_savepoint_level) - | Some savepoint -> return (Block_repr.descriptor savepoint))) + (* Else the new savepoint is the one-cycle shifted + savepoint. *) + read_predecessor_block_by_level_opt + block_store + ~head:new_head + shifted_savepoint_level + >>=? function + | None -> fail (Cannot_retrieve_savepoint shifted_savepoint_level) + | Some savepoint -> return (Block_repr.descriptor savepoint)) let compute_new_caboose block_store history_mode ~new_savepoint ~min_level_to_preserve ~new_head = @@ -683,7 +678,7 @@ module BlocksLAFL = Set.Make (Int32) updates the [new_store] by storing the predecessors of the [new_head_lafl] and preserving the [lowest_bound_to_preserve_in_floating]. It returns the cycles to - cemented from [new_head] to [cementing_highwatermark] and the + cement from [new_head] to [cementing_highwatermark] and the savepoint and caboose candidates. *) let update_floating_stores block_store ~history_mode ~ro_store ~rw_store ~new_store ~new_head ~new_head_lafl ~lowest_bound_to_preserve_in_floating @@ -878,9 +873,10 @@ let check_store_consistency block_store ~cementing_highwatermark = {highest_cemented_level; cementing_highwatermark})) (* We want to keep in the floating store, at least, the blocks above - (new_head.lafl - (new_head.lafl).max_op_ttl)) Important: we might + (new_head.lafl - (new_head.lafl).max_op_ttl)). Important: we might not have this block so it should be treated as a potential lower - bound. *) + bound. Furethermore, we consider the current caboose as a potential + lower bound.*) let compute_lowest_bound_to_preserve_in_floating block_store ~new_head ~new_head_metadata = (* Safety check: is the highwatermark consistent with our highest cemented block *) @@ -957,7 +953,7 @@ let create_merging_thread block_store ~history_mode ~old_ro_store ~old_rw_store ~rw_store:old_rw_store ~head:new_head in - match history_mode with + (match history_mode with | History_mode.Archive -> List.iter_es (fun cycle_range -> @@ -965,7 +961,6 @@ let create_merging_thread block_store ~history_mode ~old_ro_store ~old_rw_store (* In archive, we store the metadatas *) cement_blocks ~write_metadata:true block_store cycle) cycles_interval_to_cement - >>=? fun () -> return (new_savepoint, new_caboose) | Rolling offset -> let offset = (Option.value @@ -990,9 +985,9 @@ let create_merging_thread block_store ~history_mode ~old_ro_store ~old_rw_store Cemented_block_store.trigger_gc block_store.cemented_store history_mode - >>= fun () -> return (new_savepoint, new_caboose) + >>= fun () -> return_unit else (* Don't cement any cycles! *) - return (new_savepoint, new_caboose) + return_unit | Full offset -> let offset = (Option.value @@ -1013,15 +1008,15 @@ let create_merging_thread block_store ~history_mode ~old_ro_store ~old_rw_store Cemented_block_store.trigger_gc block_store.cemented_store history_mode - >>= fun () -> return (new_savepoint, new_caboose) + >>= fun () -> return_unit else List.iter_es (fun cycle_range -> cycle_reader cycle_range >>=? fun cycle -> (* In full 0, we do not store the metadata *) cement_blocks ~write_metadata:false block_store cycle) - cycles_interval_to_cement - >>=? fun () -> return (new_savepoint, new_caboose)) + cycles_interval_to_cement) + >>=? fun () -> return (new_savepoint, new_caboose)) (fun exn -> Floating_block_store.close new_ro_store >>= fun () -> Lwt.fail exn) >>=? fun (new_savepoint, new_caboose) -> @@ -1049,7 +1044,6 @@ let merge_stores block_store ~(on_error : tztrace -> unit tzresult Lwt.t) let new_head_lafl = Block_repr.last_allowed_fork_level new_head_metadata in - Store_events.(emit start_merging_stores) new_head_lafl >>= fun () -> check_store_consistency block_store ~cementing_highwatermark >>=? fun () -> diff --git a/src/lib_store/store_events.ml b/src/lib_store/store_events.ml index dd7f801c44a8..99fb7961f9d3 100644 --- a/src/lib_store/store_events.ml +++ b/src/lib_store/store_events.ml @@ -270,7 +270,7 @@ let update_protocol_table = (* Warning *) let warning_incomplete_storage = - Internal_event.Simple.declare_1 + declare_1 ~level:Internal_event.Warning ~section ~name:"incomplete_storage" -- GitLab From 14b62b565942661f06229d7d484bca720270b898 Mon Sep 17 00:00:00 2001 From: Victor Allombert Date: Wed, 15 Sep 2021 11:37:15 +0200 Subject: [PATCH 02/13] Store: minor doc fixes --- src/lib_store/cemented_block_store.ml | 2 +- src/lib_store/cemented_block_store.mli | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/lib_store/cemented_block_store.ml b/src/lib_store/cemented_block_store.ml index 2e3949b54758..212b90520146 100644 --- a/src/lib_store/cemented_block_store.ml +++ b/src/lib_store/cemented_block_store.ml @@ -566,7 +566,7 @@ let trigger_rolling_gc cemented_store cemented_blocks_files offset = in let cemented_files = Array.to_list cemented_blocks_files in (* Start by updating the indexes by filtering blocks that are - below the offset *) + below the offset *) Cemented_block_hash_index.filter cemented_store.cemented_block_hash_index (fun (level, _) -> Compare.Int32.(level > last_level_to_purge)) ; diff --git a/src/lib_store/cemented_block_store.mli b/src/lib_store/cemented_block_store.mli index 016e2090b791..d54650390ea3 100644 --- a/src/lib_store/cemented_block_store.mli +++ b/src/lib_store/cemented_block_store.mli @@ -221,8 +221,8 @@ val cement_blocks : unit tzresult Lwt.t (** [trigger_gc cemented_store history_mode] garbage collects metadata - chunks and/or chunks from the [cemented_store] depending on the - {!History_mode.t}: + chunks and/or chunks from the [cemented_store] depending on the + {!History_mode.t}: - in [Archive] mode, nothing is done; -- GitLab From 50e76e9add9169ea0356fdfd568134a16b11e316 Mon Sep 17 00:00:00 2001 From: Victor Allombert Date: Wed, 15 Sep 2021 11:37:15 +0200 Subject: [PATCH 03/13] Store: fix history mode switch --- src/lib_store/block_store.ml | 73 ++++++++++++++++++++++-------------- 1 file changed, 45 insertions(+), 28 deletions(-) diff --git a/src/lib_store/block_store.ml b/src/lib_store/block_store.ml index 66c9c3f5d0cd..0a210fb8ac4b 100644 --- a/src/lib_store/block_store.ml +++ b/src/lib_store/block_store.ml @@ -408,22 +408,32 @@ let expected_savepoint block_store ~target_offset = let cycle = cemented_block_files.(nb_files - target_offset) in Lwt.return cycle.start_level -(* [available_savepoint block_store expected_level] aims to check that - the [expected_level] can be used as a valid savepoint (that is to - say, contains metadata). It returns the [expected_level] if it is - valid. Returns the current savepoint otherwise .*) -let available_savepoint block_store expected_level = +(* [available_savepoint block_store current_head savepoint_candidate] + aims to check that the [savepoint_candidate] can be used as a valid + savepoint (that is to say, contains metadata). It returns the + [savepoint_candidate] block descriptor if it is valid. Returns the + current savepoint otherwise. *) +let available_savepoint block_store current_head savepoint_candidate = + let head_hash = Block_repr.hash current_head in savepoint block_store >>= fun current_savepoint -> - if expected_level < snd current_savepoint then - Lwt.return (snd current_savepoint) - else Lwt.return expected_level + let new_savepoint_level = + if savepoint_candidate < snd current_savepoint then snd current_savepoint + else savepoint_candidate + in + let distance = + Int32.(to_int (sub (Block_repr.level current_head) new_savepoint_level)) + in + (read_block ~read_metadata:false block_store (Block (head_hash, distance)) + >>=? function + | Some b -> return b + | None -> fail (Wrong_predecessor (head_hash, distance))) + >>=? fun block -> return (descriptor block) -(* [preserved_block block_store expected_level] returns the preserved - block if the given [expected_level] is higher that the current - preserved block. The preserved block aims to be the one needed and - maintained available to export snapshot. That is to say, the block: - lafl(head) - max_op_ttl(lafl). *) -let preserved_block block_store current_head expected_level = +(* [preserved_block block_store current_head] returns the + preserved block candidate level. The preserved block aims to be the + one needed and maintained available to export snapshot. That is to + say, the block: lafl(head) - max_op_ttl(lafl). *) +let preserved_block block_store current_head = let head_hash = Block_repr.hash current_head in read_block_metadata block_store (Block (head_hash, 0)) >|=? WithExceptions.Option.get ~loc:__LOC__ @@ -432,25 +442,18 @@ let preserved_block block_store current_head expected_level = let head_max_op_ttl = Int32.of_int (Block_repr.max_operations_ttl current_head_metadata) in - let block_to_preserve = Int32.(max 0l (sub head_lafl head_max_op_ttl)) in - let new_block_level = min block_to_preserve expected_level in - let distance = - Int32.(to_int (sub (Block_repr.level current_head) new_block_level)) - in - (read_block ~read_metadata:false block_store (Block (head_hash, distance)) - >>=? function - | Some b -> return b - | None -> fail (Wrong_predecessor (head_hash, distance))) - >>=? fun block -> return (descriptor block) + return Int32.(max 0l (sub head_lafl head_max_op_ttl)) (* [infer_savepoint block_store current_head ~target_offset] returns the savepoint candidate for an history mode switch. *) let infer_savepoint block_store current_head ~target_offset = expected_savepoint block_store ~target_offset >>= fun expected_savepoint_level -> - available_savepoint block_store expected_savepoint_level - >>= fun available_savepoint -> - preserved_block block_store current_head available_savepoint + preserved_block block_store current_head >>=? fun preserved_savepoint_level -> + let savepoint_candidate = + min preserved_savepoint_level expected_savepoint_level + in + available_savepoint block_store current_head savepoint_candidate (* [expected_caboose block_store ~target_offset] computes the expected caboose based on the [target_offset]). None is returned if @@ -493,7 +496,21 @@ let infer_caboose block_store savepoint current_head ~target_offset | Full _ -> ( match expected_caboose block_store ~target_offset with | Some expected_caboose -> - preserved_block block_store current_head expected_caboose + preserved_block block_store current_head >>=? fun preserved_caboose -> + let new_caboose_level = min expected_caboose preserved_caboose in + let head_hash = Block_repr.hash current_head in + let distance = + Int32.( + to_int (sub (Block_repr.level current_head) new_caboose_level)) + in + (read_block + ~read_metadata:false + block_store + (Block (head_hash, distance)) + >>=? function + | Some b -> return b + | None -> fail (Wrong_predecessor (head_hash, distance))) + >>=? fun block -> return (descriptor block) | None -> return savepoint) | Rolling r -> let offset = -- GitLab From 469fd6c4460df2fe63049bada1fac93f805351e4 Mon Sep 17 00:00:00 2001 From: Victor Allombert Date: Wed, 15 Sep 2021 11:37:15 +0200 Subject: [PATCH 04/13] Store: fix inconsistent savepoint drag after rolling snapshot import --- src/lib_store/block_store.ml | 30 +++++++++++++++++++++++++----- 1 file changed, 25 insertions(+), 5 deletions(-) diff --git a/src/lib_store/block_store.ml b/src/lib_store/block_store.ml index 0a210fb8ac4b..6be412ec827f 100644 --- a/src/lib_store/block_store.ml +++ b/src/lib_store/block_store.ml @@ -581,8 +581,8 @@ let switch_history_mode block_store ~current_head ~previous_history_mode (Cannot_switch_history_mode {previous_mode = previous_history_mode; next_mode = new_history_mode}) -let compute_new_savepoint block_store history_mode ~min_level_to_preserve - ~new_head ~cycles_to_cement = +let compute_new_savepoint block_store history_mode ~new_store + ~min_level_to_preserve ~new_head ~cycles_to_cement = let nb_cycles_to_cement = List.length cycles_to_cement in assert (nb_cycles_to_cement > 0) ; Stored_data.get block_store.savepoint >>= fun savepoint -> @@ -618,14 +618,31 @@ let compute_new_savepoint block_store history_mode ~min_level_to_preserve if Compare.Int32.(snd savepoint >= min_block_level) then return savepoint else let cemented_cycles_len = List.length cemented_cycles in - (* If the offset is 0, the minimum block to preserve will be - the savepoint. *) + (* If the offset is 0, the savepoint will be the minimum block + to preserve. *) if offset = 0 then return min_block_descr else if (* If the number of cemented cycles is not yet the offset, then the savepoint will be unchanged. *) cemented_cycles_len < offset - then return savepoint + then + (* In case of a freshly imported rolling snapshot, we may + drag the savepoint if it was not set on a cycle + start. Otherwise, the savepoint would be missing from the + store. We drag the savepoint only if it is not in the new + floating store nor in the cycles to cements. *) + let (savepoint_hash, savepoint_level) = savepoint in + let is_savepoint_in_cemented = + List.exists + (fun (l, h) -> l <= savepoint_level && savepoint_level <= h) + cycles_to_cement + in + if not is_savepoint_in_cemented then + Floating_block_store.mem new_store savepoint_hash + >>= fun is_savepoint_in_new_store -> + if not is_savepoint_in_new_store then return min_block_descr + else return savepoint + else return savepoint else (* Else we shift the savepoint by [nb_cycles_to_cement] cycles *) @@ -721,6 +738,8 @@ let update_floating_stores block_store ~history_mode ~ro_store ~rw_store final_hash max_nb_blocks_to_retrieve >>= fun lafl_predecessors -> + (* [min_level_to_preserve] is the lowest block that we want to keep + in the floating stores. *) let min_level_to_preserve = if List.length lafl_predecessors > 0 then Block_repr.level @@ -804,6 +823,7 @@ let update_floating_stores block_store ~history_mode ~ro_store ~rw_store compute_new_savepoint block_store history_mode + ~new_store ~min_level_to_preserve ~new_head ~cycles_to_cement -- GitLab From b4267ff5242e1ec4910fb297450112b2f8da019e Mon Sep 17 00:00:00 2001 From: Victor Allombert Date: Wed, 29 Sep 2021 11:10:32 +0200 Subject: [PATCH 05/13] Store/tests: make patch_context more modular --- src/lib_store/test/alpha_utils.ml | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/src/lib_store/test/alpha_utils.ml b/src/lib_store/test/alpha_utils.ml index f5a046acbd06..b8266448a6df 100644 --- a/src/lib_store/test/alpha_utils.ml +++ b/src/lib_store/test/alpha_utils.ml @@ -372,7 +372,7 @@ let default_genesis_parameters = bootstrap_accounts = default_accounts; } -let default_patch_context ctxt = +let patch_context ctxt ~json = let shell = { Tezos_base.Block_header.level = 0l; @@ -385,8 +385,6 @@ let default_patch_context ctxt = context = Context_hash.zero; } in - let open Tezos_protocol_alpha_parameters in - let json = Default_parameters.json_of_parameters default_genesis_parameters in let proto_params = Data_encoding.Binary.to_bytes_exn Data_encoding.json json in @@ -397,6 +395,11 @@ let default_patch_context ctxt = | Error _ -> assert false | Ok {context; _} -> return (Shell_context.unwrap_disk_context context) +let default_patch_context ctxt = + patch_context + ctxt + ~json:(Default_parameters.json_of_parameters default_genesis_parameters) + (********* Baking *************) let nb_validation_passes = List.length Main.validation_passes -- GitLab From c4eeb2df68a80c443dde024776e88f52afa3299c Mon Sep 17 00:00:00 2001 From: Victor Allombert Date: Wed, 29 Sep 2021 11:45:44 +0200 Subject: [PATCH 06/13] Store/test: test the savepoint drag after rolling import --- src/lib_store/test/test_snapshots.ml | 134 ++++++++++++++++++++++++++- 1 file changed, 132 insertions(+), 2 deletions(-) diff --git a/src/lib_store/test/test_snapshots.ml b/src/lib_store/test/test_snapshots.ml index 0c549fca0ddc..1c23c110d61c 100644 --- a/src/lib_store/test/test_snapshots.ml +++ b/src/lib_store/test/test_snapshots.ml @@ -457,7 +457,7 @@ let test_rolling () = (* We don't have a way to lock two stores in the same process => force merges by setting a new head via [bake] *) let open Filename.Infix in - let snapshot_path = store_dir // "snapshot.full" in + let snapshot_path = store_dir // "snapshot.rolling" in let chain_name = Distributed_db_version.Name.of_string "test" in let dst_dir = store_dir // "imported_store" in let dst_store_dir = dst_dir // "store" in @@ -521,6 +521,135 @@ let test_rolling () = Alpha_utils.default_genesis_parameters.constants.blocks_per_cycle, test ) +(* This test aims to check that the caboose and savepoint are well + dragged when the first merge occurs, after a rolling snapshot + import on a block which is not on a cycle's bound. Indeed, in such + a scenario, the merge procedure may remove blocks bellow the lafl + without cementing them. It would result in non stored caboose + (rolling issue) and savepoint (rolling and full issue). + In this test, we need to increase the number of blocks per cycle to + avoid the max_op_ttl to hide this potential issue. The exported + block must be outside the max_op_ttl of the next checkpoint. *) +let test_drag_after_import () = + let constants = + Default_parameters.{constants_test with blocks_per_cycle = 256l} + in + let patch_context ctxt = + let test_parameters = + let open Tezos_protocol_alpha_parameters in + { + Default_parameters.(parameters_of_constants constants) with + bootstrap_accounts = Alpha_utils.default_accounts; + } + in + Alpha_utils.patch_context + ctxt + ~json:(Default_parameters.json_of_parameters test_parameters) + in + let test (store_dir, context_dir) store = + let chain_store = Store.main_chain_store store in + Store.Chain.genesis_block chain_store >>= fun genesis_block -> + let nb_cycles_to_bake = 2 in + Alpha_utils.bake_until_n_cycle_end + chain_store + nb_cycles_to_bake + genesis_block + >>=? fun (_blocks, head) -> + (* We don't have a way to lock two stores in the same process => + force merges by setting a new head via [bake] *) + let open Filename.Infix in + let snapshot_path = store_dir // "snapshot.rolling" in + let chain_name = Distributed_db_version.Name.of_string "test" in + let dst_dir = store_dir // "imported_store" in + let dst_store_dir = dst_dir // "store" in + let dst_context_dir = dst_dir // "context" in + (*FIXME test over Raw formats as well *) + (* export distance is higer than the 120 max_op_tt*) + let export_distance = 130 in + Store.Block.read_block + chain_store + (Store.Block.hash head) + ~distance:export_distance + >>=? fun export_block -> + let export_block_hash = Store.Block.hash export_block in + Snapshots.export + ~snapshot_path + Snapshots.Tar + ~rolling:true + ~block:(`Hash (export_block_hash, 0)) + ~store_dir + ~context_dir + ~chain_name + genesis + >>=? fun () -> + Snapshots.import + ~snapshot_path + ~dst_store_dir + ~dst_context_dir + ~chain_name + ~user_activated_upgrades:[] + ~user_activated_protocol_overrides:[] + ~block:export_block_hash + genesis + >>=? fun () -> + Store.init + ~patch_context + ~readonly:false + ~store_dir:dst_store_dir + ~context_dir:dst_context_dir + ~allow_testchains:true + genesis + >>=? fun store' -> + let chain_store' = Store.main_chain_store store' in + (* Finish to bake the current cycle. *) + Alpha_utils.bake_until_cycle_end chain_store' export_block + >>=? fun (_, _head) -> + Store.Chain.savepoint chain_store' + >>= fun (savepoint_hash, savepoint_level) -> + Store.Block.read_block chain_store' savepoint_hash >>=? fun savepoint -> + Store.Block.get_block_metadata chain_store' savepoint >>=? fun metadata -> + let expected_caboose = + Int32.( + sub savepoint_level (of_int (Store.Block.max_operations_ttl metadata))) + in + Store.Chain.caboose chain_store' >>= fun (_, caboose_level) -> + let prn i = Format.sprintf "%ld" i in + Assert.equal + ~prn + ~msg:__LOC__ + ~eq:Compare.Int32.equal + caboose_level + expected_caboose ; + let block_store = Store.Unsafe.get_block_store chain_store' in + let rec restart n head = + if n = 0 then return head + else + Alpha_utils.bake_until_cycle_end chain_store' head >>=? fun (_, head) -> + Block_store.await_merging block_store >>= fun () -> + Store.Chain.caboose chain_store' >>= fun (_, caboose_level) -> + Store.Chain.savepoint chain_store' >>= fun (_, savepoint_level) -> + List.iter_es + (fun level -> + Store.Block.read_block_by_level chain_store' level + >>=? fun _sucess -> return_unit) + Int32.( + List.map of_int (to_int caboose_level -- to_int savepoint_level)) + >>=? fun () -> restart (n - 1) head + in + (* With the given constants, it is required to bake 7 cycles to + trigger the first merge. *) + restart 7 export_block >>=? fun _h -> return_unit + in + wrap_test + ~keep_dir:false + ~history_mode:History_mode.default + ~patch_context + ( Format.asprintf + "check caboose and savepoint drag after rolling import (blocks per \ + cycle = %ld)" + constants.blocks_per_cycle, + test ) + (* TODO: export => import => export => import from full & rolling export equivalence @@ -534,6 +663,7 @@ let tests speed = Tezos_protocol_alpha_parameters.Default_parameters.( parameters_of_constants constants_sandbox) in - test_rolling () :: generated_tests + test_rolling () :: test_drag_after_import () :: generated_tests in + ("snapshots", test_cases) -- GitLab From c838836fc339c5827559b5a1b992e5714ef70b74 Mon Sep 17 00:00:00 2001 From: Victor Allombert Date: Wed, 15 Sep 2021 12:16:50 +0200 Subject: [PATCH 07/13] Store: improve misleading documentation --- src/lib_store/block_store.mli | 5 +++-- src/lib_store/cemented_block_store.mli | 4 ++-- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/src/lib_store/block_store.mli b/src/lib_store/block_store.mli index 47b717ca4967..ae88b4415c32 100644 --- a/src/lib_store/block_store.mli +++ b/src/lib_store/block_store.mli @@ -209,8 +209,9 @@ val mem : block_store -> key -> bool tzresult Lwt.t val get_hash : block_store -> key -> Block_hash.t option tzresult Lwt.t (** [read_block ~read_metadata block_store key] reads the block [key] - in [block_store] if present. Return [None] if the block is - unknown. *) + in [block_store] if present. Return [None] if the block is + unknown. If [read_metadata] is set to [true] it tries to retreive + the metadata but do not fail if it is not available. *) val read_block : read_metadata:bool -> block_store -> key -> Block_repr.t option tzresult Lwt.t diff --git a/src/lib_store/cemented_block_store.mli b/src/lib_store/cemented_block_store.mli index d54650390ea3..fa8ccf88ae5f 100644 --- a/src/lib_store/cemented_block_store.mli +++ b/src/lib_store/cemented_block_store.mli @@ -195,8 +195,8 @@ val get_highest_cemented_level : t -> int32 option (** [get_cemented_block_by_level cemented_store ~read_metadata level] reads the cemented block at [level] in [cemented_store], if it - exists. It also retrieves the metadata depending on - [read_metadata]. *) + exists. It also tries to retrieves the metadata depending on + [read_metadata] but do not fail if no metadata is available. *) val get_cemented_block_by_level : t -> read_metadata:bool -> int32 -> Block_repr.block option tzresult Lwt.t -- GitLab From a2c6d8695dd74c7f9463cf713d36821b3809ce21 Mon Sep 17 00:00:00 2001 From: Victor Allombert Date: Thu, 23 Sep 2021 15:19:56 +0200 Subject: [PATCH 08/13] Shell/Store: removing redundant event --- src/lib_store/store.ml | 1 - 1 file changed, 1 deletion(-) diff --git a/src/lib_store/store.ml b/src/lib_store/store.ml index 0e957e97c990..9eece5d9bc7e 100644 --- a/src/lib_store/store.ml +++ b/src/lib_store/store.ml @@ -1077,7 +1077,6 @@ module Chain = struct | _ -> return_unit let set_head chain_store new_head = - Store_events.(emit set_head) (Block.descriptor new_head) >>= fun () -> Shared.update_with chain_store.chain_state (fun chain_state -> (* The merge cannot finish until we release the lock on the chain state so its status cannot change while this -- GitLab From 7716078c90f57d1f8f03a29ec7db34cdd2001249 Mon Sep 17 00:00:00 2001 From: vbot Date: Mon, 27 Sep 2021 11:22:43 +0200 Subject: [PATCH 09/13] Store: log the error when a merge fails --- src/lib_store/store.ml | 6 +++--- src/lib_store/store_events.ml | 9 ++++++--- 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/src/lib_store/store.ml b/src/lib_store/store.ml index 9eece5d9bc7e..49b6835c14de 100644 --- a/src/lib_store/store.ml +++ b/src/lib_store/store.ml @@ -1084,16 +1084,16 @@ module Chain = struct (* Also check the status to be extra-safe *) Block_store.status chain_store.block_store >>= fun store_status -> (match Block_store.get_merge_status chain_store.block_store with - | Merge_failed _ -> + | Merge_failed errs -> (* If the merge has failed, notify in the logs but don't trigger any merge. *) - Store_events.(emit notify_merge_error ()) >>= fun () -> + Store_events.(emit notify_merge_error errs) >>= fun () -> (* We mark the merge as on-going to prevent the merge from being triggered and to update on-disk values. *) return_true | Not_running when store_status <> Idle -> (* Degenerate case, do the same as the Merge_failed case *) - Store_events.(emit notify_merge_error ()) >>= fun () -> return_true + Store_events.(emit notify_merge_error []) >>= fun () -> return_true | Not_running -> return_false | Running -> return_true) >>=? fun is_merge_ongoing -> diff --git a/src/lib_store/store_events.ml b/src/lib_store/store_events.ml index 99fb7961f9d3..484d2fe85656 100644 --- a/src/lib_store/store_events.ml +++ b/src/lib_store/store_events.ml @@ -295,9 +295,12 @@ let merge_error = ("message", Data_encoding.string) let notify_merge_error = - declare_0 + declare_1 ~section ~level:Internal_event.Error ~name:"notify_merge_error" - ~msg:"store merge has failed, restart the node to restore the consistency" - () + ~msg: + "store merge has failed, restart the node to restore the consistency: \ + {errs}" + ~pp1:(fun ppf -> Format.fprintf ppf "%a" Error_monad.pp_print_trace) + ("errs", Error_monad.trace_encoding) -- GitLab From cea1f759f540ef6d581aba8393e207ec509c0f5d Mon Sep 17 00:00:00 2001 From: Victor Allombert Date: Wed, 29 Sep 2021 11:14:01 +0200 Subject: [PATCH 10/13] Store: make block cache configurable --- src/lib_store/block_store.ml | 15 +++++++++------ src/lib_store/block_store.mli | 17 +++++++++++------ src/lib_store/store.ml | 36 +++++++++++++++++++++++------------ src/lib_store/store.mli | 26 ++++++++++++++----------- 4 files changed, 59 insertions(+), 35 deletions(-) diff --git a/src/lib_store/block_store.ml b/src/lib_store/block_store.ml index 6be412ec827f..be5352ab1646 100644 --- a/src/lib_store/block_store.ml +++ b/src/lib_store/block_store.ml @@ -33,8 +33,7 @@ module Block_cache = map_maker ~replacement:LRU ~overflow:Strong ~accounting:Precise)) (Block_hash)) -(* TODO: make limits configurable *) -let block_cache_limit = 100 +let default_block_cache_limit = 100 type merge_status = Not_running | Running | Merge_failed of tztrace @@ -1222,7 +1221,7 @@ let may_recover_merge block_store = block_store.rw_floating_block_store <- rw ; write_status block_store Idle >>=? fun () -> return_unit)) -let load chain_dir ~genesis_block ~readonly = +let load ?block_cache_limit chain_dir ~genesis_block ~readonly = Cemented_block_store.init chain_dir ~readonly >>=? fun cemented_store -> Floating_block_store.init chain_dir ~readonly RO >>= fun ro_floating_block_store -> @@ -1236,7 +1235,10 @@ let load chain_dir ~genesis_block ~readonly = >>=? fun caboose -> Stored_data.init (Naming.block_store_status_file chain_dir) ~initial_data:Idle >>=? fun status_data -> - let block_cache = Block_cache.create block_cache_limit in + let block_cache = + Block_cache.create + (Option.value block_cache_limit ~default:default_block_cache_limit) + in let merge_scheduler = Lwt_idle_waiter.create () in let merge_mutex = Lwt_mutex.create () in let block_store = @@ -1262,8 +1264,9 @@ let load chain_dir ~genesis_block ~readonly = fail_unless (status = Idle) Cannot_load_degraded_store >>=? fun () -> return block_store -let create chain_dir ~genesis_block = - load chain_dir ~genesis_block ~readonly:false >>=? fun block_store -> +let create ?block_cache_limit chain_dir ~genesis_block = + load chain_dir ?block_cache_limit ~genesis_block ~readonly:false + >>=? fun block_store -> store_block block_store genesis_block >>=? fun () -> return block_store let pp_merge_status fmt status = diff --git a/src/lib_store/block_store.mli b/src/lib_store/block_store.mli index ae88b4415c32..9a6dcba08a37 100644 --- a/src/lib_store/block_store.mli +++ b/src/lib_store/block_store.mli @@ -314,19 +314,24 @@ val switch_history_mode : new_history_mode:History_mode.t -> unit tzresult Lwt.t -(** [create ~chain_dir ~genesis_block] instantiates a fresh - [block_store] in directory [chain_dir] and stores the +(** [create ?block_cache_limit ~chain_dir ~genesis_block] instantiates + a fresh [block_store] in directory [chain_dir] and stores the [genesis_block] in it. It fails if the given [chain_dir] is already - populated.*) + populated. Setting the [block_cache_limit] allows to override the + default block cache size. *) val create : + ?block_cache_limit:int -> [`Chain_dir] Naming.directory -> genesis_block:Block_repr.t -> block_store tzresult Lwt.t -(** [load chain_dir ~genesis_block ~readonly] loads an existing - block_store from directory [chain_dir]. Setting [readonly] will - prevent new blocks from being stored. *) +(** [load ?block_cache_limit chain_dir ~genesis_block ~readonly] loads + an existing block_store from directory [chain_dir]. Setting + [readonly] will prevent new blocks from being stored. Setting the + [block_cache_limit] allows to override the default block cache + size. *) val load : + ?block_cache_limit:int -> [`Chain_dir] Naming.directory -> genesis_block:Block_repr.t -> readonly:bool -> diff --git a/src/lib_store/store.ml b/src/lib_store/store.ml index 49b6835c14de..99f01a274759 100644 --- a/src/lib_store/store.ml +++ b/src/lib_store/store.ml @@ -1677,9 +1677,9 @@ module Chain = struct | Ok v -> Lwt.return_some v | Error _ -> Lwt.return_none - let create_chain_store global_store chain_dir ?target ~chain_id - ?(expiration = None) ?genesis_block ~genesis ~genesis_context history_mode - = + let create_chain_store ?block_cache_limit global_store chain_dir ?target + ~chain_id ?(expiration = None) ?genesis_block ~genesis ~genesis_context + history_mode = (* Chain directory *) let genesis_block = match genesis_block with @@ -1687,7 +1687,8 @@ module Chain = struct | Some genesis_block -> genesis_block in (* Block_store.create also stores genesis *) - Block_store.create chain_dir ~genesis_block >>=? fun block_store -> + Block_store.create ?block_cache_limit chain_dir ~genesis_block + >>=? fun block_store -> let chain_config = {history_mode; genesis; expiration} in Stored_data.write_file (Naming.chain_config_file chain_dir) chain_config >>=? fun () -> @@ -1724,14 +1725,16 @@ module Chain = struct in return chain_store - let load_chain_store global_store chain_dir ~chain_id ~readonly = + let load_chain_store ?block_cache_limit global_store chain_dir ~chain_id + ~readonly = Stored_data.load (Naming.chain_config_file chain_dir) >>=? fun chain_config_data -> Stored_data.get chain_config_data >>= fun chain_config -> Stored_data.load (Naming.genesis_block_file chain_dir) >>=? fun genesis_block_data -> Stored_data.get genesis_block_data >>= fun genesis_block -> - Block_store.load chain_dir ~genesis_block ~readonly >>=? fun block_store -> + Block_store.load ?block_cache_limit chain_dir ~genesis_block ~readonly + >>=? fun block_store -> load_chain_state chain_dir block_store >>=? fun chain_state -> let chain_state = Shared.create chain_state in let block_watcher = Lwt_watcher.create_input () in @@ -2080,8 +2083,9 @@ module Protocol = struct Lwt_watcher.create_stream protocol_watcher end -let create_store ~context_index ~chain_id ~genesis ~genesis_context - ?(history_mode = History_mode.default) ~allow_testchains store_dir = +let create_store ?block_cache_limit ~context_index ~chain_id ~genesis + ~genesis_context ?(history_mode = History_mode.default) ~allow_testchains + store_dir = let store_dir_path = Naming.dir_path store_dir in Lwt_utils_unix.create_dir store_dir_path >>= fun () -> Protocol_store.init store_dir >>= fun protocol_store -> @@ -2100,6 +2104,7 @@ let create_store ~context_index ~chain_id ~genesis ~genesis_context } in Chain.create_chain_store + ?block_cache_limit global_store chain_dir ~chain_id @@ -2111,8 +2116,8 @@ let create_store ~context_index ~chain_id ~genesis ~genesis_context global_store.main_chain_store <- Some main_chain_store ; return global_store -let load_store ?history_mode store_dir ~context_index ~genesis ~chain_id - ~allow_testchains ~readonly () = +let load_store ?history_mode ?block_cache_limit store_dir ~context_index + ~genesis ~chain_id ~allow_testchains ~readonly () = let chain_dir = Naming.chain_dir store_dir chain_id in protect (fun () -> @@ -2148,7 +2153,12 @@ let load_store ?history_mode store_dir ~context_index ~genesis ~chain_id global_block_watcher; } in - Chain.load_chain_store global_store chain_dir ~chain_id ~readonly + Chain.load_chain_store + ?block_cache_limit + global_store + chain_dir + ~chain_id + ~readonly >>=? fun main_chain_store -> let stored_genesis = Chain.genesis main_chain_store in fail_unless @@ -2172,7 +2182,7 @@ let main_chain_store store = WithExceptions.Option.get ~loc:__LOC__ store.main_chain_store let init ?patch_context ?commit_genesis ?history_mode ?(readonly = false) - ~store_dir ~context_dir ~allow_testchains genesis = + ?block_cache_limit ~store_dir ~context_dir ~allow_testchains genesis = let store_dir = Naming.store_dir ~dir_path:store_dir in let chain_id = Chain_id.of_block_hash genesis.Genesis.block in (match commit_genesis with @@ -2197,6 +2207,7 @@ let init ?patch_context ?commit_genesis ?history_mode ?(readonly = false) if Sys.file_exists chain_dir_path && Sys.is_directory chain_dir_path then load_store ?history_mode + ?block_cache_limit store_dir ~context_index ~genesis @@ -2208,6 +2219,7 @@ let init ?patch_context ?commit_genesis ?history_mode ?(readonly = false) (* Fresh store *) commit_genesis ~chain_id >>=? fun genesis_context -> create_store + ?block_cache_limit store_dir ~context_index ~chain_id diff --git a/src/lib_store/store.mli b/src/lib_store/store.mli index 91306dffe2e9..b1dce520dd47 100644 --- a/src/lib_store/store.mli +++ b/src/lib_store/store.mli @@ -174,17 +174,17 @@ type chain_store (** {3 Initialization} *) -(** [init ?patch_context ?commit_genesis ?history_mode ~store_dir - ~context_dir ~allow_testchains genesis] initializes the store and - a main chain store. If [store_dir] (resp. [context_dir]) does not - exist, a fresh store (resp. context) is created. Otherwise, it - loads the store (resp. context) from reading the adequate - directory. If [allow_testchains] is passed, the store will be able - to fork chains and instantiate testchain's sub chain stores, for - all chains contained in the store. The chain store created is - based on the [genesis] provided. Its chain identifier will be - computed using the {!Tezos_crypto.Chain_id.of_block_hash} - function. +(** [init ?patch_context ?commit_genesis ?history_mode + ?block_cache_limit ~store_dir ~context_dir ~allow_testchains + genesis] initializes the store and a main chain store. If + [store_dir] (resp. [context_dir]) does not exist, a fresh store + (resp. context) is created. Otherwise, it loads the store + (resp. context) from reading the adequate directory. If + [allow_testchains] is passed, the store will be able to fork + chains and instantiate testchain's sub chain stores, for all + chains contained in the store. The chain store created is based on + the [genesis] provided. Its chain identifier will be computed + using the {!Tezos_crypto.Chain_id.of_block_hash} function. @param patch_context the handle called when initializing the context. It usually is passed when creating a sandboxed chain. @@ -202,6 +202,9 @@ type chain_store Default: {!History_mode.default} (which should correspond to full with 5 extra preserved cycles.) + @param block_cache_limit allows to override the size of the block + cache to use. The minimal value is 1. + @param readonly a flag that, if set to true, prevent writing throughout the store {b and} context. Default: false @@ -211,6 +214,7 @@ val init : ?commit_genesis:(chain_id:Chain_id.t -> Context_hash.t tzresult Lwt.t) -> ?history_mode:History_mode.t -> ?readonly:bool -> + ?block_cache_limit:int -> store_dir:string -> context_dir:string -> allow_testchains:bool -> -- GitLab From 2ab7aaa47106f1cdb1ebd79dfd14b547207bc4a0 Mon Sep 17 00:00:00 2001 From: Victor Allombert Date: Thu, 30 Sep 2021 15:55:13 +0200 Subject: [PATCH 11/13] Store: check chain_config at consistency check --- src/lib_store/consistency.ml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/lib_store/consistency.ml b/src/lib_store/consistency.ml index 65dffde3443a..6de1dd841ebb 100644 --- a/src/lib_store/consistency.ml +++ b/src/lib_store/consistency.ml @@ -136,6 +136,8 @@ let check_consistency chain_dir genesis = (Inconsistent_genesis {expected = genesis.block; got = Block_repr.hash genesis_block}) >>=? fun () -> + Stored_data.load (Naming.chain_config_file chain_dir) + >>=? fun _chain_config -> Stored_data.load (Naming.caboose_file chain_dir) >>=? fun caboose_data -> Stored_data.get caboose_data >>= fun caboose -> Stored_data.load (Naming.savepoint_file chain_dir) >>=? fun savepoint_data -> -- GitLab From 570a0fae4bf4a38452b31174b987fc439d58edbf Mon Sep 17 00:00:00 2001 From: Victor Allombert Date: Thu, 30 Sep 2021 15:34:02 +0200 Subject: [PATCH 12/13] Store: infer history mode using config when restoring consistency --- src/lib_store/consistency.ml | 44 ++++++++++++++++++++++++++--------- src/lib_store/consistency.mli | 9 +++---- src/lib_store/store.ml | 6 ++++- src/lib_store/store_events.ml | 24 +++++++++++++++++++ 4 files changed, 67 insertions(+), 16 deletions(-) diff --git a/src/lib_store/consistency.ml b/src/lib_store/consistency.ml index 6de1dd841ebb..b4f0ea69346b 100644 --- a/src/lib_store/consistency.ml +++ b/src/lib_store/consistency.ml @@ -838,11 +838,11 @@ let fix_protocol_levels context_index block_store genesis genesis_header ~head (* [fix_chain_state ~chain_dir ~head ~cementing_highwatermark ~checkpoint ~savepoint ~caboose ~alternate_heads ~forked_chains - ~protocol_levels ~genesis ~genesis_context] writes, as + ~protocol_levels ~chain_config ~genesis ~genesis_context] writes, as [Stored_data.t], the given arguments. *) let fix_chain_state chain_dir ~head ~cementing_highwatermark ~checkpoint ~savepoint ~caboose ~alternate_heads ~forked_chains ~protocol_levels - ~genesis ~genesis_context = + ~chain_config ~genesis ~genesis_context = (* By setting each stored data, we erase the previous content. *) let rec init_protocol_table protocol_table = function | [] -> protocol_table @@ -855,6 +855,8 @@ let fix_chain_state chain_dir ~head ~cementing_highwatermark ~checkpoint let protocol_table = init_protocol_table Protocol_levels.empty protocol_levels in + Stored_data.write_file (Naming.chain_config_file chain_dir) chain_config + >>=? fun () -> Stored_data.write_file (Naming.protocol_levels_file chain_dir) protocol_table >>=? fun () -> let genesis_block = @@ -882,9 +884,8 @@ let fix_chain_state chain_dir ~head ~cementing_highwatermark ~checkpoint Stored_data.write_file (Naming.forked_chains_file chain_dir) forked_chains >>=? fun () -> return_unit -(* [fix_chain_config ~chain_dir block_store genesis caboose savepoint] - infers the history mode and update the [chain_config]. *) -let fix_chain_config chain_dir block_store genesis caboose savepoint = +(* Infers the history mode by inspecting the state of the store. *) +let infer_history_mode chain_dir block_store genesis caboose savepoint = let cemented_block_store = Block_store.cemented_block_store block_store in let cemented_blocks_files = match Cemented_block_store.cemented_blocks_files cemented_block_store with @@ -933,8 +934,28 @@ let fix_chain_config chain_dir block_store genesis caboose savepoint = full or rolling. We choose full as the less destructive. *) Full offset in - let chain_config = {history_mode; genesis; expiration = None} in - Stored_data.write_file (Naming.chain_config_file chain_dir) chain_config + Store_events.(emit restore_infered_history_mode history_mode) >>= fun () -> + return {history_mode; genesis; expiration = None} + +(* [fix_chain_config ?history_mode ~chain_dir block_store genesis + caboose savepoint] infers the history mode. *) +let fix_chain_config ?history_mode chain_dir block_store genesis caboose + savepoint = + Stored_data.load (Naming.chain_config_file chain_dir) >>= function + | Ok chain_config -> + (* If the store's config is available, we use it as is. *) + Stored_data.get chain_config >>= return + | Error _ -> ( + match history_mode with + (* Otherwise, we try to get the history mode that was given by + the command line or the config file. *) + | Some history_mode -> + Store_events.(emit restore_history_mode history_mode) >>= fun () -> + return {history_mode; genesis; expiration = None} + | None -> + (* If there is no hint in the config file nor the command + line, we try to infer the history mode. *) + infer_history_mode chain_dir block_store genesis caboose savepoint) let fix_cementing_highwatermark block_store = let cemented_block_store = Block_store.cemented_block_store block_store in @@ -944,7 +965,7 @@ let fix_cementing_highwatermark block_store = Store_events.(emit fix_cementing_highwatermark cementing_highwatermark) >>= fun () -> Lwt.return cementing_highwatermark -(* [fix_consistency store_dir context_index] +(* [fix_consistency ?history_mode store_dir context_index] aims to fix a store in an inconsistent state. The fixing steps are: - the current head is set as the highest block level found in the floating stores, @@ -959,7 +980,7 @@ let fix_cementing_highwatermark block_store = Assumptions: - context is valid and available - block store is valid and available *) -let fix_consistency chain_dir context_index genesis = +let fix_consistency ?history_mode chain_dir context_index genesis = Store_events.(emit fix_store ()) >>= fun () -> (* We suppose that the genesis block is accessible *) trace @@ -977,8 +998,8 @@ let fix_consistency chain_dir context_index genesis = fix_savepoint_and_caboose chain_dir block_store head >>=? fun (savepoint, caboose) -> fix_checkpoint chain_dir block_store head >>=? fun checkpoint -> - fix_chain_config chain_dir block_store genesis caboose savepoint - >>=? fun () -> + fix_chain_config ?history_mode chain_dir block_store genesis caboose savepoint + >>=? fun chain_config -> fix_protocol_levels context_index block_store @@ -997,6 +1018,7 @@ let fix_consistency chain_dir context_index genesis = ~alternate_heads:[] ~forked_chains:Chain_id.Map.empty ~protocol_levels + ~chain_config ~genesis ~genesis_context:(Block_repr.context genesis_block) >>=? fun () -> diff --git a/src/lib_store/consistency.mli b/src/lib_store/consistency.mli index 3f566b60d285..acb8f6de9b6b 100644 --- a/src/lib_store/consistency.mli +++ b/src/lib_store/consistency.mli @@ -28,11 +28,12 @@ val check_consistency : [`Chain_dir] Naming.directory -> Genesis.t -> unit tzresult Lwt.t -(** [fix_consistency store_dir context_index ~genesis] aims to fix a - store if it is in an inconsistent state. To do so, it will inspect - the stored data and try infer all the missing or corrupted - parts. *) +(** [fix_consistency ?history_mode store_dir context_index genesis] + aims to fix a store if it is in an inconsistent state. To do so, it + will inspect the stored data and try infer all the missing or + corrupted parts. *) val fix_consistency : + ?history_mode:History_mode.t -> [`Chain_dir] Naming.directory -> Context.index -> Genesis.t -> diff --git a/src/lib_store/store.ml b/src/lib_store/store.ml index 99f01a274759..28b9db542182 100644 --- a/src/lib_store/store.ml +++ b/src/lib_store/store.ml @@ -2135,7 +2135,11 @@ let load_store ?history_mode ?block_cache_limit store_dir ~context_index Lwt.return_error err | err -> Store_events.(emit inconsistent_store err) >>= fun () -> - Consistency.fix_consistency chain_dir context_index genesis + Consistency.fix_consistency + chain_dir + context_index + genesis + ?history_mode >>=? fun () -> Store_events.(emit store_was_fixed ()) >>= fun () -> return_unit) >>=? fun () -> diff --git a/src/lib_store/store_events.ml b/src/lib_store/store_events.ml index 484d2fe85656..2648236a7520 100644 --- a/src/lib_store/store_events.ml +++ b/src/lib_store/store_events.ml @@ -268,6 +268,30 @@ let update_protocol_table = ("block_level", Data_encoding.int32) ~pp4:pp_int32 +let restore_history_mode = + declare_1 + ~section + ~level:Internal_event.Notice + ~name:"restore_history_mode" + ~msg: + "history mode was sucessfully restored to {history_mode}, based on the \ + configuration file or command line argument" + ("history_mode", History_mode.encoding) + ~pp1:History_mode.pp + +let restore_infered_history_mode = + declare_1 + ~section + ~level:Internal_event.Notice + ~name:"restore_infered_history_mode" + ~msg: + "history mode was sucessfully restored to {history_mode}. Warning: this \ + history mode may differ from the one preceding the restore procedure \ + and you may need to restart the node to explicitely force the history \ + mode switch" + ("history_mode", History_mode.encoding) + ~pp1:History_mode.pp + (* Warning *) let warning_incomplete_storage = declare_1 -- GitLab From 2ac7b6fc315dc4302094d65de28c1931bd7adf9b Mon Sep 17 00:00:00 2001 From: Victor Allombert Date: Wed, 15 Sep 2021 15:39:57 +0200 Subject: [PATCH 13/13] Changelog: store fixes --- CHANGES.rst | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/CHANGES.rst b/CHANGES.rst index b1c211c17904..b32988c57a8b 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -169,6 +169,14 @@ Node the associations between a protocol and its activation block was not well updated. +- Fixed wrong behaviour when updating the additional cycles of the + node's history mode. + +- Removed redundant event while setting a new head. + +- Fixed wrong behaviour when merging the store after a rolling + snapshot import. + Client ------ -- GitLab