From beeee005241845b907df64e4e7ceb3385ae65b85 Mon Sep 17 00:00:00 2001 From: Victor Allombert Date: Fri, 10 Dec 2021 08:57:17 +0100 Subject: [PATCH 1/6] Store: remove legacy store and associated tests --- src/lib_store/dune | 2 - src/lib_store/legacy.ml | 882 -------- src/lib_store/legacy.mli | 93 - src/lib_store/legacy_store/.ocamlformat | 17 - src/lib_store/legacy_store/README.md | 10 - src/lib_store/legacy_store/dune | 47 - src/lib_store/legacy_store/dune-project | 3 - src/lib_store/legacy_store/legacy_chain.ml | 140 -- src/lib_store/legacy_store/legacy_chain.mli | 76 - .../legacy_store/legacy_chain_traversal.ml | 170 -- .../legacy_store/legacy_chain_traversal.mli | 69 - .../legacy_store/legacy_snapshots.ml | 1010 --------- .../legacy_store/legacy_snapshots.mli | 51 - src/lib_store/legacy_store/legacy_state.ml | 1892 ----------------- src/lib_store/legacy_store/legacy_state.mli | 457 ---- src/lib_store/legacy_store/legacy_store.ml | 469 ---- src/lib_store/legacy_store/legacy_store.mli | 237 --- .../legacy_store/legacy_store_builder.ml | 349 --- src/lib_store/legacy_store/raw_store.ml | 314 --- src/lib_store/legacy_store/raw_store.mli | 40 - src/lib_store/legacy_store/store_helpers.ml | 470 ---- src/lib_store/legacy_store/store_helpers.mli | 63 - src/lib_store/legacy_store/store_sigs.ml | 204 -- src/lib_store/legacy_store/test/.ocamlformat | 17 - src/lib_store/legacy_store/test/assert.ml | 85 - src/lib_store/legacy_store/test/dune | 19 - src/lib_store/legacy_store/test/test.ml | 31 - .../legacy_store/test/test_raw_store.ml | 94 - .../legacy_store/tezos-legacy-store.opam | 26 - src/lib_store/store.ml | 211 -- src/lib_store/store.mli | 23 - src/lib_store/test/dune | 29 +- src/lib_store/test/legacy_store_maker.ml | 112 - src/lib_store/test/legacy_utils.ml | 458 ---- src/lib_store/test/test_legacy.ml | 667 ------ src/lib_store/tezos-store.opam | 1 - tests_python/conftest.py | 80 - tests_python/tests_011/conftest.py | 36 - .../tests_011/test_legacy_snapshots.py | 353 --- tests_python/tests_011/test_legacy_upgrade.py | 212 -- tests_python/tests_alpha/conftest.py | 36 - .../tests_alpha/test_legacy_snapshots.py | 353 --- .../tests_alpha/test_legacy_upgrade.py | 212 -- 43 files changed, 2 insertions(+), 10118 deletions(-) delete mode 100644 src/lib_store/legacy.ml delete mode 100644 src/lib_store/legacy.mli delete mode 100644 src/lib_store/legacy_store/.ocamlformat delete mode 100644 src/lib_store/legacy_store/README.md delete mode 100644 src/lib_store/legacy_store/dune delete mode 100644 src/lib_store/legacy_store/dune-project delete mode 100644 src/lib_store/legacy_store/legacy_chain.ml delete mode 100644 src/lib_store/legacy_store/legacy_chain.mli delete mode 100644 src/lib_store/legacy_store/legacy_chain_traversal.ml delete mode 100644 src/lib_store/legacy_store/legacy_chain_traversal.mli delete mode 100644 src/lib_store/legacy_store/legacy_snapshots.ml delete mode 100644 src/lib_store/legacy_store/legacy_snapshots.mli delete mode 100644 src/lib_store/legacy_store/legacy_state.ml delete mode 100644 src/lib_store/legacy_store/legacy_state.mli delete mode 100644 src/lib_store/legacy_store/legacy_store.ml delete mode 100644 src/lib_store/legacy_store/legacy_store.mli delete mode 100644 src/lib_store/legacy_store/legacy_store_builder.ml delete mode 100644 src/lib_store/legacy_store/raw_store.ml delete mode 100644 src/lib_store/legacy_store/raw_store.mli delete mode 100644 src/lib_store/legacy_store/store_helpers.ml delete mode 100644 src/lib_store/legacy_store/store_helpers.mli delete mode 100644 src/lib_store/legacy_store/store_sigs.ml delete mode 100644 src/lib_store/legacy_store/test/.ocamlformat delete mode 100644 src/lib_store/legacy_store/test/assert.ml delete mode 100644 src/lib_store/legacy_store/test/dune delete mode 100644 src/lib_store/legacy_store/test/test.ml delete mode 100644 src/lib_store/legacy_store/test/test_raw_store.ml delete mode 100644 src/lib_store/legacy_store/tezos-legacy-store.opam delete mode 100644 src/lib_store/test/legacy_store_maker.ml delete mode 100644 src/lib_store/test/legacy_utils.ml delete mode 100644 src/lib_store/test/test_legacy.ml delete mode 100644 tests_python/tests_011/test_legacy_snapshots.py delete mode 100644 tests_python/tests_011/test_legacy_upgrade.py delete mode 100644 tests_python/tests_alpha/test_legacy_snapshots.py delete mode 100644 tests_python/tests_alpha/test_legacy_upgrade.py diff --git a/src/lib_store/dune b/src/lib_store/dune index aa7b66f90b09..1923652fcb76 100644 --- a/src/lib_store/dune +++ b/src/lib_store/dune @@ -9,7 +9,6 @@ irmin-pack tezos-stdlib-unix tezos-context - tezos-legacy-store tezos-validation tezos-protocol-updater lwt-watcher @@ -23,7 +22,6 @@ -open Tezos_base -open Tezos_base__TzPervasives -open Tezos_context - -open Tezos_legacy_store -open Tezos_validation -open Tezos_protocol_updater -open Tezos_stdlib_unix diff --git a/src/lib_store/legacy.ml b/src/lib_store/legacy.ml deleted file mode 100644 index 3707db6ab939..000000000000 --- a/src/lib_store/legacy.ml +++ /dev/null @@ -1,882 +0,0 @@ -(*****************************************************************************) -(* *) -(* Open Source License *) -(* Copyright (c) 2020-2021 Nomadic Labs, *) -(* *) -(* Permission is hereby granted, free of charge, to any person obtaining a *) -(* copy of this software and associated documentation files (the "Software"),*) -(* to deal in the Software without restriction, including without limitation *) -(* the rights to use, copy, modify, merge, publish, distribute, sublicense, *) -(* and/or sell copies of the Software, and to permit persons to whom the *) -(* Software is furnished to do so, subject to the following conditions: *) -(* *) -(* The above copyright notice and this permission notice shall be included *) -(* in all copies or substantial portions of the Software. *) -(* *) -(* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR*) -(* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, *) -(* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL *) -(* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER*) -(* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING *) -(* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER *) -(* DEALINGS IN THE SOFTWARE. *) -(* *) -(*****************************************************************************) - -open Filename.Infix - -(* Hardcoded networks data *) -module Hardcoded = struct - type network = {name : Distributed_db_version.Name.t; cycle_length : int} - - let proj (name, cycle_length) = - {name = Distributed_db_version.Name.of_string name; cycle_length} - - (* Hardcoded cycle length *) - let supported_networks = - List.map - proj - [ - ("TEZOS_MAINNET", 4096); - ("TEZOS_EDO2NET_2021-02-11T14:00:00Z", 2048); - ("TEZOS_FLORENCENOBANET_2021-03-04T20:00:00Z", 2048); - ("TEZOS_GRANADANET_2021-05-21T15:00:00Z", 8192); - ("TEZOS", 8); - ] - - let cycle_length ~chain_name = - List.find_map - (fun {name; cycle_length} -> - if chain_name = name then Some cycle_length else None) - supported_networks - |> WithExceptions.Option.get ~loc:__LOC__ - - let check_network ~chain_name = - if not (List.exists (fun {name; _} -> chain_name = name) supported_networks) - then - failwith - "Cannot perform operation for chain_name %a. Only %a are supported." - Distributed_db_version.Name.pp - chain_name - (Format.pp_print_list - ~pp_sep:(fun ppf () -> Format.fprintf ppf ", ") - (fun ppf {name; _} -> - Format.fprintf ppf "%a" Distributed_db_version.Name.pp name)) - supported_networks - else return_unit - - (* Tells if the setting the checkpoint requires to cement blocks *) - let may_update_checkpoint ~cycle_length nb_blocks = nb_blocks = cycle_length -end - -(* Legacy store conversion *) - -type error += - | Failed_to_convert_protocol of Protocol_hash.t - | (* TODO: Better way to handle errors ? *) Failed_to_upgrade of string - -let () = - register_error_kind - `Permanent - ~id:"legacy.failed_to_convert_protocol" - ~title:"Failed to convert protocol" - ~description:"Failed to convert protocol from legacy store." - ~pp:(fun ppf -> - Format.fprintf - ppf - "Failed to convert protocol %a from legacy store." - Protocol_hash.pp) - Data_encoding.(obj1 (req "protocol_hash" Protocol_hash.encoding)) - (function Failed_to_convert_protocol p -> Some p | _ -> None) - (fun p -> Failed_to_convert_protocol p) ; - register_error_kind - `Permanent - ~id:"legacy.failed_to_upgrade" - ~title:"Failed to upgrade" - ~description:"Failed to upgrade the store." - ~pp:(fun ppf -> Format.fprintf ppf "Failed to upgrade the store: %s.") - Data_encoding.(obj1 (req "msg" string)) - (function Failed_to_upgrade s -> Some s | _ -> None) - (fun s -> Failed_to_upgrade s) - -module Event = struct - include Internal_event.Simple - - let section = ["node"; "legacy"; "upgrade"] - - let level = Internal_event.Notice - - let restoring_after_failure = - Internal_event.Simple.declare_1 - ~level - ~section - ~name:"restoring_after_failure" - ~msg: - "cleaning directory {directory} because of failure: restoring the \ - former store" - ~pp1:Format.pp_print_string - ("directory", Data_encoding.string) - - let advertise_upgrade_mode = - Internal_event.Simple.declare_2 - ~level - ~section - ~name:"advertise_upgrade_mode" - ~msg:"upgrading storage from '{old_hm}' to '{new_hm}'" - ~pp1:Format.pp_print_string - ("old_hm", Data_encoding.string) - ~pp2:Format.pp_print_string - ("new_hm", Data_encoding.string) - - let upgrade_completed = - Internal_event.Simple.declare_1 - ~level - ~section - ~name:"upgrade_completed" - ~msg: - "upgrade completed - you may now safely remove the former storage \ - located at '{path}'" - ~pp1:Format.pp_print_string - ("path", Data_encoding.string) -end - -let hash_header legacy_chain_store header = - let is_genesis = Compare.Int32.(header.Block_header.shell.level = 0l) in - if is_genesis then Legacy_store.Chain.Genesis_hash.read legacy_chain_store - else return (Block_header.hash header) - -type with_metadata = Required | Unwanted | Optional - -(* Build a Store.Block.block_repr from reading the data from a legacy - store. [with_metadata] determines the presence of metadata based on - the following rules: - - Required: output block must contain metadata. - - Unwanted: output block must not contain metadata. The block can - be deprieved of its metadata. - - Optional: output block may contain metadata. The block is return - as it is. *) -let make_block_repr ~with_metadata legacy_chain_state hash = - Legacy_state.Block.read legacy_chain_state hash >>=? fun block -> - Legacy_state.Block.all_operations block >>= fun operations -> - Legacy_state.Block.metadata_hash block >>= fun block_metadata_hash -> - Legacy_state.Block.all_operations_metadata_hashes block - >>= fun operations_metadata_hashes -> - let contents = - ({ - header = block.header; - operations; - block_metadata_hash; - operations_metadata_hashes; - } - : Block_repr.contents) - in - Legacy_state.Block.read_contents_opt block >>= fun stored_metadata -> - match (stored_metadata, with_metadata) with - | (None, Required) -> fail (Legacy_state.Block_contents_not_found block.hash) - | (None, Unwanted) | (None, Optional) -> - let metadata = None in - return ({hash; contents; metadata} : Block_repr.t) - | (Some m, Required) | (Some m, Optional) -> - Legacy_state.Block.all_operations_metadata block - >>= fun operations_metadata -> - let metadata = - Some - ({ - message = m.message; - max_operations_ttl = m.max_operations_ttl; - last_allowed_fork_level = m.last_allowed_fork_level; - block_metadata = m.metadata; - operations_metadata; - } - : Block_repr.metadata) - in - return ({hash; contents; metadata} : Block_repr.t) - | (Some _, Unwanted) -> - let metadata = None in - return ({hash; contents; metadata} : Block_repr.t) - -(* Updates the protocol table. Inserts entry when the proto_level of - [prev_block] differs from [proto_level] (which is the protocol - level of the successor of [prev_block]). *) -(* TODO recheck this function *) -let may_update_protocol_table legacy_chain_store chain_store ~prev_block ~block - = - let proto_level = Block_repr.proto_level block in - let block_level = Block_repr.level block in - if proto_level <> Block_repr.proto_level prev_block then ( - Legacy_store.Chain.Protocol_info.bindings legacy_chain_store - >>= fun protocol_table -> - let (proto_hash, transition_level) : Legacy_store.Chain.Protocol_info.value - = - List.assoc ~equal:Int.equal proto_level protocol_table - |> WithExceptions.Option.get ~loc:__LOC__ - in - assert (Int32.equal transition_level block_level) ; - Store.Chain.may_update_protocol_level - chain_store - ~pred:(Store.Unsafe.block_of_repr prev_block) - ~protocol_level:proto_level - (Store.Unsafe.block_of_repr block, proto_hash)) - else return_unit - -(* Reads a block at a given level [i] from a known [block_hash]. The - target level [i] must be a predecessor of [block_hash]. *) -let read_i legacy_chain_state block_hash i = - Legacy_state.Block.read legacy_chain_state block_hash >>=? fun block -> - let pred = Int32.(to_int (sub block.header.shell.level i)) in - Legacy_state.Block.read_predecessor legacy_chain_state ~pred block_hash - >>= function - | Some {header; _} -> return header - | None -> failwith "Failed to find block at level %ld" i - -(* Reads, from the legacy lmdb store, the blocks from [block_hash] to - [limit] and store them in the floating store. The ~with_metadata - flag provide information on how to store the floating block: with - or without metadata *) -let import_floating legacy_chain_state legacy_chain_store chain_store - ~with_metadata end_hash start_level = - let block_store = Store.Unsafe.get_block_store chain_store in - read_i legacy_chain_state end_hash start_level >>=? fun start_header -> - hash_header legacy_chain_store start_header >>=? fun start_hash -> - make_block_repr ~with_metadata legacy_chain_state start_hash - >>=? fun start_block -> - (make_block_repr - ~with_metadata - legacy_chain_state - start_header.shell.predecessor - >>= function - | Ok b -> Lwt.return_some b - | _ -> Lwt.return_none) - >>= fun pred_block_opt -> - make_block_repr ~with_metadata legacy_chain_state end_hash - >>=? fun end_block -> - let end_limit = Block_repr.level end_block in - let nb_floating_blocks = - Int32.(to_int (succ (sub (Block_repr.level end_block) start_level))) - in - Animation.display_progress - ~pp_print_step:(fun fmt i -> - Format.fprintf - fmt - "Converting floating blocks: %d/%d" - i - nb_floating_blocks) - (fun notify -> - let rec aux ~pred_block block = - Block_store.store_block block_store block >>=? fun () -> - let level = Block_repr.level block in - if level >= end_limit then notify () >>= fun () -> return_unit - else - (* At protocol change, update the protocol_table *) - (match pred_block with - | None -> return_unit (* FIXME: should be assert false? *) - | Some prev_block -> - may_update_protocol_table - legacy_chain_store - chain_store - ~prev_block - ~block) - >>=? fun () -> - read_i legacy_chain_state end_hash (Int32.succ level) - >>=? fun next_block -> - hash_header legacy_chain_store next_block >>=? fun next_block_hash -> - make_block_repr ~with_metadata legacy_chain_state next_block_hash - >>=? fun next_block_repr -> - notify () >>= fun () -> aux ~pred_block:(Some block) next_block_repr - in - aux ~pred_block:pred_block_opt start_block) - -(* Reads, from the legacy lmdb store, the blocks from [start_block] to - [end_limit] and store them in the cemented block store *) -let import_cemented legacy_chain_state legacy_chain_store chain_store - cycle_length ~with_metadata ~start_block ~end_limit = - let nb_cemented_cycles = - let cycles = - Int32.(to_int (sub (Block_repr.level start_block) end_limit)) - / cycle_length - in - if end_limit = 1l then succ cycles else cycles - in - let write_metadata = - match with_metadata with Required -> true | Unwanted | Optional -> false - in - let display_msg = if write_metadata then " (with metadata)" else "" in - Animation.display_progress - ~pp_print_step:(fun fmt i -> - Format.fprintf - fmt - "Converting cemented blocks%s: %d/%d" - display_msg - i - nb_cemented_cycles) - (fun notify -> - let rec aux ~prev_block (last_checkpoint : Block_header.t) acc hash = - make_block_repr ~with_metadata legacy_chain_state hash >>=? fun block -> - (* At protocol change, update the protocol_table. Arguments - are inverted as the blocks to cement are browse from high - to low. *) - may_update_protocol_table - legacy_chain_store - chain_store - ~prev_block:block - ~block:prev_block - >>=? fun () -> - let new_acc = block :: acc in - if Block_repr.level block <= end_limit then - (* The low limit of the cemented blocks to import was - reached. *) - if end_limit = 1l then - (* This case corresponds to networks with [0;1] as first - cycle.*) - (* Reading genesis which is not pruned in lmdb *) - make_block_repr - ~with_metadata:Unwanted - legacy_chain_state - (Block_repr.predecessor block) - >>=? fun genesis -> - may_update_protocol_table - legacy_chain_store - chain_store - ~prev_block:genesis - ~block - >>=? fun () -> - let block_store = Store.Unsafe.get_block_store chain_store in - let blocks = - match List.hd new_acc with - | Some br when Block_repr.level br = 0l -> new_acc - | Some _ -> genesis :: new_acc - | None -> assert false - in - Block_store.cement_blocks - ~check_consistency:false - block_store - blocks - ~write_metadata - >>=? fun () -> - notify () >>= fun () -> return_unit - else ( - assert (List.length acc = 0) ; - return_unit) - else if - Hardcoded.may_update_checkpoint ~cycle_length (List.length new_acc) - then - (* The end of an hardcoded cycle was reached. We need to - cement the chunk.*) - let block_store = Store.Unsafe.get_block_store chain_store in - Block_store.cement_blocks - ~check_consistency:false - block_store - new_acc - ~write_metadata - >>=? fun () -> - notify () >>= fun () -> - aux - ~prev_block:block - (Block_repr.header block) - [] - (Block_repr.predecessor block) - else - aux - ~prev_block:block - last_checkpoint - (block :: acc) - (Block_repr.predecessor block) - in - aux - ~prev_block:start_block - (Block_repr.header start_block) - [start_block] - (Block_repr.predecessor start_block)) - -let archive_import legacy_chain_state legacy_chain_store chain_store - cycle_length (checkpoint_header, checkpoint_level) current_head_hash = - hash_header legacy_chain_store checkpoint_header >>=? fun checkpoint_hash -> - (if checkpoint_level = 0l then - (* Only the floating store should be imported. Nothing was pruned - yet so we import the metadata. *) - import_floating - legacy_chain_state - legacy_chain_store - chain_store - ~with_metadata:Required - current_head_hash - (Int32.succ checkpoint_level) - else - make_block_repr ~with_metadata:Required legacy_chain_state checkpoint_hash - >>=? fun checkpoint_block -> - (* First, import cemented blocks [1l;checkpoint] with metadata as - nothing is pruned in archive mode. *) - import_cemented - legacy_chain_state - legacy_chain_store - chain_store - cycle_length - ~with_metadata:Required - ~start_block:checkpoint_block - ~end_limit:1l - >>=? fun () -> - (* Then, import floating blocks [checkpoint;head] with metadata as - nothing is pruned in archive mode. *) - import_floating - legacy_chain_state - legacy_chain_store - chain_store - ~with_metadata:Required - current_head_hash - (Int32.succ checkpoint_level)) - >>=? fun () -> - let new_checkpoint = (checkpoint_hash, checkpoint_level) in - let genesis = Store.Chain.genesis chain_store in - let new_caboose = (genesis.block, 0l) in - let new_savepoint = new_caboose in - return (new_checkpoint, new_savepoint, new_caboose) - -(* As the lmdb store is not compatible with a Full 5, it is upgraded as - a Full 0. It will converge to a Full 5 afterward. *) -let full_import legacy_chain_state legacy_chain_store chain_store cycle_length - (checkpoint, checkpoint_level) current_head_hash = - hash_header legacy_chain_store checkpoint >>=? fun checkpoint_hash -> - (if checkpoint_level = 0l then - (* Only the floating store should be imported. Nothing was pruned - yet so we import the metadata. *) - import_floating - legacy_chain_state - legacy_chain_store - chain_store - ~with_metadata:Optional - current_head_hash - 1l - else - make_block_repr ~with_metadata:Unwanted legacy_chain_state checkpoint_hash - >>=? fun checkpoint_block_repr -> - (* First, import cemented blocks [1l;checkpoint] without metadata - as those blocks were pruned. *) - import_cemented - legacy_chain_state - legacy_chain_store - chain_store - cycle_length - ~with_metadata:Unwanted - ~start_block:checkpoint_block_repr - ~end_limit:1l - >>=? fun () -> - (* Then, import floating blocks [checkpoint;head] with metadata as - those blocks were not pruned yet. *) - import_floating - legacy_chain_state - legacy_chain_store - chain_store - ~with_metadata:Optional - current_head_hash - checkpoint.shell.level - >>=? fun () -> return_unit) - >>=? fun () -> - (Legacy_state.Chain.save_point legacy_chain_state >|= fun (l, h) -> (h, l)) - >>= fun legacy_savepoint -> - let new_checkpoint = legacy_savepoint in - let genesis = Store.Chain.genesis chain_store in - let new_caboose = (genesis.block, 0l) in - let new_savepoint = new_checkpoint in - return (new_checkpoint, new_savepoint, new_caboose) - -(* As the lmdb store is not compatible with a Rolling 5, it is upgraded as - a Rolling 0. It will converge to a Rolling 5 afterward. *) -let rolling_import legacy_chain_state legacy_chain_store chain_store - (checkpoint_header, checkpoint_level) current_head_hash = - hash_header legacy_chain_store checkpoint_header >>=? fun checkpoint_hash -> - Legacy_state.Chain.caboose legacy_chain_state - >>= fun (legacy_caboose_level, _legacy_caboose_hash) -> - (if checkpoint_level = 0l then - (* Only the floating store should be imported. Nothing was pruned - yet so we import the metadata. *) - import_floating - legacy_chain_state - legacy_chain_store - chain_store - ~with_metadata:Optional - current_head_hash - 1l - else - (* Importing floating [ lmdb_caboose ; lmdb_checkpoint [ without - metadata as those blocks were pruned. *) - Legacy_state.Block.read legacy_chain_state checkpoint_hash - >>=? fun checkpoint_block -> - let checkpoint_header = checkpoint_block.header in - assert (Block_header.hash checkpoint_header = checkpoint_hash) ; - let checkpoint_pred_hash = checkpoint_header.shell.predecessor in - import_floating - legacy_chain_state - legacy_chain_store - chain_store - ~with_metadata:Unwanted - checkpoint_pred_hash - legacy_caboose_level - >>=? fun () -> - (* Importing blocks [ checkpoint ; current_head ] in floating with - metadata as those blocks were not pruned yet. *) - import_floating - legacy_chain_state - legacy_chain_store - chain_store - ~with_metadata:Optional - current_head_hash - checkpoint_level) - >>=? fun () -> - read_i legacy_chain_state current_head_hash legacy_caboose_level - >>=? fun new_caboose_header -> - (if checkpoint_level = 0l then - let genesis = Store.Chain.genesis chain_store in - return (genesis.block, 0l) - else - hash_header legacy_chain_store new_caboose_header >>=? fun caboose_hash -> - return (caboose_hash, new_caboose_header.shell.level)) - >>=? fun new_caboose -> - (Legacy_state.Chain.save_point legacy_chain_state >|= fun (l, h) -> (h, l)) - >>= fun legacy_savepoint -> - let new_checkpoint = legacy_savepoint in - let new_savepoint = new_checkpoint in - return (new_checkpoint, new_savepoint, new_caboose) - -let import_blocks legacy_chain_state chain_id chain_store cycle_length - checkpoint history_mode = - Legacy_state.Chain.store legacy_chain_state >>= fun legacy_store -> - let legacy_chain_store = Legacy_store.Chain.get legacy_store chain_id in - let legacy_chain_data = Legacy_store.Chain_data.get legacy_chain_store in - Legacy_store.Chain_data.Current_head.read legacy_chain_data - >>=? fun current_head_hash -> - (match (history_mode : History_mode.t) with - | Archive -> - archive_import - legacy_chain_state - legacy_chain_store - chain_store - cycle_length - checkpoint - current_head_hash - | Full _ -> - full_import - legacy_chain_state - legacy_chain_store - chain_store - cycle_length - checkpoint - current_head_hash - | Rolling _ -> - rolling_import - legacy_chain_state - legacy_chain_store - chain_store - checkpoint - current_head_hash) - >>=? fun (new_checkpoint, new_savepoint, new_caboose) -> - return (new_checkpoint, new_savepoint, new_caboose) - -let store_known_protocols legacy_store store = - Legacy_store.Protocol.Contents.bindings legacy_store >>= fun proto_list -> - List.iter_es - (fun (h, p) -> - Store.Protocol.store store h p >>= function - | Some expected_hash -> - fail_unless - (Protocol_hash.equal expected_hash h) - (Failed_to_convert_protocol h) - | None -> fail (Failed_to_convert_protocol h) >>=? fun () -> return_unit) - proto_list - -let import_protocols history_mode legacy_store legacy_chain_state store - _chain_store chain_id = - let legacy_chain_store = Legacy_store.Chain.get legacy_store chain_id in - let legacy_chain_data = Legacy_store.Chain_data.get legacy_chain_store in - match (history_mode : History_mode.t) with - | Archive | Full _ -> store_known_protocols legacy_store store - | Rolling _ -> - Legacy_store.Chain_data.Current_head.read legacy_chain_data - >>=? fun current_head_hash -> - Legacy_state.Block.read legacy_chain_state current_head_hash - >>=? fun current_head -> - Legacy_state.Chain.caboose legacy_chain_state - >>= fun (_, legacy_caboose_hash) -> - (* We store the oldest known protocol and we assume that its - transition_header is the caboose as the actual transition - block is unknown. *) - Legacy_state.Block.read legacy_chain_state legacy_caboose_hash - >>=? fun transition_block -> - let transition_header = transition_block.header in - let protocol_level = current_head.header.shell.proto_level in - Legacy_store.Chain.Protocol_info.read legacy_chain_store protocol_level - >>=? fun protocol_info -> - let protocol_hash = fst protocol_info in - let chain_store = Store.main_chain_store store in - let is_genesis = transition_header.shell.level = 0l in - hash_header legacy_chain_store transition_header - >>=? fun transition_hash -> - Legacy_state.Block.last_allowed_fork_level current_head >>=? fun lafl -> - (if lafl > transition_header.shell.level && not is_genesis then - make_block_repr - ~with_metadata:Unwanted - legacy_chain_state - transition_hash - else - make_block_repr - ~with_metadata:Unwanted - legacy_chain_state - transition_hash) - >>=? fun transition_block -> - Store.Unsafe.set_protocol_level - chain_store - ~protocol_level - (Store.Unsafe.block_of_repr transition_block, protocol_hash) - -let import_invalid_blocks legacy_chain_store = - Legacy_store.Block.Invalid_block.fold - legacy_chain_store - ~init:Block_hash.Map.empty - ~f:(fun hash ({level; errors} : Legacy_store.Block.invalid_block) map -> - Lwt.return - (Block_hash.Map.add - hash - ({level; errors} : Store_types.invalid_block) - map)) - -let import_forked_chains legacy_chain_store = - Legacy_store.Forking_block_hash.fold - legacy_chain_store - ~init:Chain_id.Map.empty - ~f:(fun id hash map -> Lwt.return (Chain_id.Map.add id hash map)) - -let update_stored_data legacy_chain_store legacy_store new_store ~new_checkpoint - ~new_savepoint ~new_caboose genesis = - let chain_store = Store.main_chain_store new_store in - let store_dir = Store.directory new_store in - let legacy_chain_data = Legacy_store.Chain_data.get legacy_chain_store in - Legacy_store.Chain_data.Current_head.read legacy_chain_data - >>=? fun legacy_head -> - import_invalid_blocks legacy_chain_store >>= fun invalid_blocks -> - import_forked_chains legacy_store >>= fun forked_chains -> - Store.Unsafe.restore_from_legacy_upgrade - store_dir - ~genesis - ~invalid_blocks - ~forked_chains - >>=? fun () -> - Store.Block.read_block chain_store legacy_head >>=? fun new_head -> - Store.Unsafe.set_head chain_store new_head >>=? fun () -> - Store.Unsafe.set_checkpoint chain_store new_checkpoint >>=? fun () -> - Store.Unsafe.set_savepoint chain_store new_savepoint >>=? fun () -> - let block_store = Store.Unsafe.get_block_store chain_store in - let cemented_store = Block_store.cemented_block_store block_store in - let cementing_highwatermark = - Cemented_block_store.get_highest_cemented_level cemented_store - in - Store.Unsafe.set_cementing_highwatermark chain_store cementing_highwatermark - >>=? fun () -> Store.Unsafe.set_caboose chain_store new_caboose - -(* Returns the infered checkpoint of the chain or None if the current - head is set to genesis. *) -let infer_checkpoint legacy_chain_state chain_id = - (* When upgrading from a full or rolling node, the checkpoint may - not be set on a "protocol defined checkpoint". We substitute it - by using, as a checkpoint, the highest block between the - savepoint and the last allowed fork level of the current - head. *) - Legacy_state.Chain.store legacy_chain_state >>= fun legacy_store -> - let legacy_chain_store = Legacy_store.Chain.get legacy_store chain_id in - let legacy_chain_data = Legacy_store.Chain_data.get legacy_chain_store in - Legacy_store.Chain_data.Current_head.read legacy_chain_data - >>=? fun head_hash -> - Legacy_state.Block.read legacy_chain_state head_hash >>=? fun head_contents -> - if head_contents.header.shell.level = 0l then return_none - else - Legacy_state.Block.last_allowed_fork_level head_contents >>=? fun lafl -> - Legacy_store.Chain_data.Save_point.read legacy_chain_data - >>=? fun (savepoint_level, savepoint_hash) -> - Legacy_state.Block.read legacy_chain_state savepoint_hash - >>=? fun savepoint -> - if Compare.Int32.(lafl > savepoint_level) then - read_i legacy_chain_state head_hash lafl >>=? fun lafl_header -> - return_some (lafl_header, lafl) - else return_some (Legacy_state.Block.header savepoint, savepoint_level) - -let upgrade_cleaner data_dir ~upgraded_store = - Event.(emit restoring_after_failure) data_dir >>= fun () -> - Lwt_utils_unix.remove_dir upgraded_store >>= fun () -> Lwt.return_unit - -let raw_upgrade chain_name ~new_store ~legacy_state history_mode genesis = - let chain_id = Chain_id.of_block_hash genesis.Genesis.block in - Legacy_state.Chain.get legacy_state chain_id >>=? fun legacy_chain_state -> - Legacy_state.Chain.store legacy_chain_state >>= fun legacy_store -> - let legacy_chain_store = Legacy_store.Chain.get legacy_store chain_id in - let cycle_length = Hardcoded.cycle_length ~chain_name in - (infer_checkpoint legacy_chain_state chain_id >>=? function - | None -> - Legacy_state.Block.read legacy_chain_state genesis.block - >>=? fun genesis_block -> - return (genesis_block.header, genesis_block.header.shell.level) - | Some checkpoint -> return checkpoint) - >>=? fun checkpoint -> - let new_chain_store = Store.main_chain_store new_store in - import_protocols - history_mode - legacy_store - legacy_chain_state - new_store - new_chain_store - chain_id - >>=? fun () -> - import_blocks - legacy_chain_state - chain_id - new_chain_store - cycle_length - checkpoint - history_mode - >>=? fun (new_checkpoint, new_savepoint, new_caboose) -> - update_stored_data - legacy_chain_store - legacy_store - new_store - ~new_checkpoint - ~new_savepoint - ~new_caboose - genesis - -let temporary_former_store_path ~data_dir = data_dir // "lmdb_store_to_remove" - -let upgrade_0_0_4 ~data_dir ?patch_context - ~(chain_name : Distributed_db_version.Name.t) genesis = - Hardcoded.check_network ~chain_name >>=? fun () -> - let new_store_tmp = data_dir // "new_store_tmp" in - Lwt.try_bind - (fun () -> - Lwt_unix.file_exists new_store_tmp >>= fun previous_aborted_upgrade -> - (if previous_aborted_upgrade then Lwt_utils_unix.remove_dir new_store_tmp - else Lwt.return_unit) - >>= fun () -> - let store_to_upgrade = data_dir // "store" in - let context_root = data_dir // "context" in - Legacy_state.init - ~readonly:true - ~context_root - ~store_root:store_to_upgrade - genesis - >>=? fun (state, _chain_state, _context_index, legacy_history_mode) -> - let history_mode = History_mode.convert legacy_history_mode in - Event.( - emit - advertise_upgrade_mode - ( Format.asprintf "%a" History_mode.Legacy.pp legacy_history_mode, - Format.asprintf "%a" History_mode.pp history_mode )) - >>= fun () -> - Store.init - ?patch_context - ~store_dir:new_store_tmp - ~context_dir:context_root - ~history_mode - ~allow_testchains:true - genesis - >>=? fun store -> - raw_upgrade - chain_name - ~new_store:store - ~legacy_state:state - history_mode - genesis - >>=? fun () -> - Legacy_state.close state >>= fun () -> - Store.close_store store >>= fun () -> - Lwt_unix.rename store_to_upgrade (temporary_former_store_path ~data_dir) - >>= fun () -> - let final_store_path = data_dir // "store" in - Lwt_unix.rename new_store_tmp final_store_path >>= fun () -> return_unit) - (function - | Ok () -> - Event.(emit upgrade_completed (temporary_former_store_path ~data_dir)) - >>= fun () -> return_unit - | Error errors -> - upgrade_cleaner data_dir ~upgraded_store:new_store_tmp >>= fun () -> - Lwt.return (Error errors)) - (fun exn -> - upgrade_cleaner data_dir ~upgraded_store:new_store_tmp >>= fun () -> - fail_with_exn exn) - -let upgrade_0_0_5 ~data_dir genesis = - let floating_stores_to_upgrade = Floating_block_store.[RO; RW; RW_TMP] in - let store_dir = - Naming.store_dir ~dir_path:Filename.Infix.(data_dir // "store") - in - let chain_id = Chain_id.of_block_hash genesis.Genesis.block in - let chain_dir = Naming.chain_dir store_dir chain_id in - (* Remove the potential RO_TMP floating stores *) - (let path = Naming.dir_path (Naming.floating_blocks_dir chain_dir RO_TMP) in - Lwt_unix.file_exists path >>= fun exists -> - if exists then Lwt_utils_unix.remove_dir path else Lwt.return_unit) - >>= fun () -> - (* Move the floating stores to upgrade in "_broken" suffixed - directory *) - let broken_floating_blocks_dir floating_blocks_dir = - Naming.dir_path floating_blocks_dir ^ "_broken" - in - List.iter_s - (fun kind -> - let floating_blocks_dir = Naming.floating_blocks_dir chain_dir kind in - let path = Naming.dir_path floating_blocks_dir in - Lwt_unix.file_exists path >>= function - | false -> - (* Nothing to do: should only happen with RW_TMP *) - Lwt.return_unit - | true -> - Lwt_unix.rename - (Naming.dir_path floating_blocks_dir) - (broken_floating_blocks_dir floating_blocks_dir)) - floating_stores_to_upgrade - >>= fun () -> - Stored_data.load (Naming.genesis_block_file chain_dir) - >>=? fun genesis_block_data -> - Stored_data.get genesis_block_data >>= fun genesis_block -> - Block_store.load chain_dir ~genesis_block ~readonly:false - >>=? fun block_store -> - (* Set the merge status as Idle: we are overriding the merge *) - Block_store.write_status block_store Idle >>=? fun () -> - (* Iter through the blocks and add then into the new floating stores *) - List.iter_es - (fun kind -> - let kind_str = - (function - | Floating_block_store.RO -> "RO" - | RW -> "RW" - | RW_TMP -> "RW_TMP" - | _ -> assert false) - kind - in - let floating_blocks_dir = Naming.floating_blocks_dir chain_dir kind in - Lwt_unix.file_exists (Naming.dir_path floating_blocks_dir) >>= function - | false -> - (* Nothing to do: should only happen with RW_TMP *) - return_unit - | true -> - Animation.display_progress - ~pp_print_step:(fun fmt i -> - Format.fprintf fmt "upgrading %s floating store %d" kind_str i) - (fun notify -> - Lwt_unix.openfile - Filename.Infix.( - broken_floating_blocks_dir floating_blocks_dir // "blocks") - [Unix.O_CREAT; O_CLOEXEC; Unix.O_RDONLY] - 0o444 - >>= fun fd -> - Floating_block_store.iter_s_raw_fd - (fun block -> - notify () >>= fun () -> - Block_store.store_block block_store block) - fd - >>=? fun () -> - Lwt_unix.close fd >>= fun () -> return_unit)) - floating_stores_to_upgrade - >>=? fun () -> - (* Remove the former broken floating stores *) - List.iter_s - (fun kind -> - let floating_blocks_dir = Naming.floating_blocks_dir chain_dir kind in - Lwt_utils_unix.remove_dir (broken_floating_blocks_dir floating_blocks_dir)) - floating_stores_to_upgrade - >>= fun () -> return_unit diff --git a/src/lib_store/legacy.mli b/src/lib_store/legacy.mli deleted file mode 100644 index ec18880d9360..000000000000 --- a/src/lib_store/legacy.mli +++ /dev/null @@ -1,93 +0,0 @@ -(*****************************************************************************) -(* *) -(* Open Source License *) -(* Copyright (c) 2020 Nomadic Labs, *) -(* *) -(* Permission is hereby granted, free of charge, to any person obtaining a *) -(* copy of this software and associated documentation files (the "Software"),*) -(* to deal in the Software without restriction, including without limitation *) -(* the rights to use, copy, modify, merge, publish, distribute, sublicense, *) -(* and/or sell copies of the Software, and to permit persons to whom the *) -(* Software is furnished to do so, subject to the following conditions: *) -(* *) -(* The above copyright notice and this permission notice shall be included *) -(* in all copies or substantial portions of the Software. *) -(* *) -(* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR*) -(* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, *) -(* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL *) -(* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER*) -(* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING *) -(* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER *) -(* DEALINGS IN THE SOFTWARE. *) -(* *) -(*****************************************************************************) - -(** Legacy storage upgrade - - The legacy store upgrade aims to migrate a storage using the LMDB - backend (v0.0.4) to the new store representation (v0.0.5). This - migration is available for any store running v0.0.4 with any - history mode. The upgrade procedure is going to retrieve each the - block and its associated data available in the old store, convert - and store them in the new backend. It will preserve all the - information originally contained in the LMDB store such as the - current head, checkpoint, savepoint and caboose. *) - -(** Module for handling values needed by the legacy upgrade. It - exposes the [cycle_length] to allow upgrading [supported_networks] - only. These values are mandatory as the new backend regroup blocks - by cycles, something which was not available in the previous - representation when the metadata of blocks is not fully available. -*) -module Hardcoded : sig - type network = {name : Distributed_db_version.Name.t; cycle_length : int} - - val supported_networks : network list - - val cycle_length : chain_name:Distributed_db_version.Name.t -> int - - val check_network : - chain_name:Distributed_db_version.Name.t -> unit tzresult Lwt.t -end - -type error += Failed_to_convert_protocol of Protocol_hash.t - -type error += Failed_to_upgrade of string - -(** [temporary_former_store_path ~data_dir] returns the path of the - preserved legacy store given a [data_dir] *) -val temporary_former_store_path : data_dir:string -> string - -(** [raw_upgrade chain_name ~new_store ~old_store hm genesis] is the - low level upgrade procedure which performs the store upgrade given - the direct paths to the [new_store] and [old_store]. - - {b Warning} This function is unsafe and is exposed for testing - purposes. *) -val raw_upgrade : - Distributed_db_version.Name.t -> - new_store:Store.t -> - legacy_state:Legacy_state.t -> - History_mode.t -> - Genesis.t -> - unit tzresult Lwt.t - -(** [upgrade_0_0_4 ~data_dir ?patch_context ~chain_name genesis] - upgrades a store located in [data_dir] base on v.0.0.4 to a - v0.0.5. It outputs information regarding the necessary actions in - order to cleanely complete the upgrade. Here this case, the user - must delete the old storage. Returns the message to display to the - user. *) -val upgrade_0_0_4 : - data_dir:string -> - ?patch_context:(Context.t -> Context.t tzresult Lwt.t) -> - chain_name:Distributed_db_version.Name.t -> - Genesis.t -> - unit tzresult Lwt.t - -(** [upgrade_0_0_5 ~data_dir genesis] upgrades a v0.0.5 store located - in [data_dir] to the v0.0.6 format. This upgrade consists in - removing the broken floating indexes of the former store and - re-rewriting some fresh ones. *) -val upgrade_0_0_5 : data_dir:string -> Genesis.t -> unit tzresult Lwt.t diff --git a/src/lib_store/legacy_store/.ocamlformat b/src/lib_store/legacy_store/.ocamlformat deleted file mode 100644 index 5e1158919e85..000000000000 --- a/src/lib_store/legacy_store/.ocamlformat +++ /dev/null @@ -1,17 +0,0 @@ -version=0.18.0 -wrap-fun-args=false -let-binding-spacing=compact -field-space=loose -break-separators=after -space-around-arrays=false -space-around-lists=false -space-around-records=false -space-around-variants=false -dock-collection-brackets=true -space-around-records=false -sequence-style=separator -doc-comments=before -margin=80 -module-item-spacing=sparse -parens-tuple=always -parens-tuple-patterns=always diff --git a/src/lib_store/legacy_store/README.md b/src/lib_store/legacy_store/README.md deleted file mode 100644 index 2e2ced99f4b9..000000000000 --- a/src/lib_store/legacy_store/README.md +++ /dev/null @@ -1,10 +0,0 @@ -# tezos-legacy-store -Summary line: Legacy storage library for storing chain data - -## Overview -- `tezos-legacy-store` provides the legacy abstraction of the disk - storage. It aims to handles the on-disk storage of static objects - such as blocks, operations, block's metadata, protocols and chain - data. -- This legacy aims to be used in migration test from the legacy store - and **must not be used for new development**. diff --git a/src/lib_store/legacy_store/dune b/src/lib_store/legacy_store/dune deleted file mode 100644 index cbf037d46637..000000000000 --- a/src/lib_store/legacy_store/dune +++ /dev/null @@ -1,47 +0,0 @@ -(library - (name tezos_legacy_store) - (public_name tezos-legacy-store) - (instrumentation (backend bisect_ppx)) - (libraries - tezos-base - tezos-shell-services - tezos-stdlib-unix - tezos-protocol-updater - tezos-lmdb - tezos-validation - tezos-context - lwt-watcher) - (flags - (:standard - -open Tezos_shell_services - -open Tezos_base - -open Tezos_base__TzPervasives - -open Tezos_stdlib_unix - -open Tezos_stdlib - -open Tezos_validation - -open Tezos_context - -open Tezos_protocol_updater)) - (modules (:standard \ legacy_store_builder))) - -(executable - (name legacy_store_builder) - (instrumentation (backend bisect_ppx)) - (libraries - tezos-legacy-store - tezos-validation - tezos-protocol-updater - ; embed some protocols for testing purposes - tezos-embedded-protocol-alpha - tezos-embedded-protocol-genesis) - (flags - (:standard - -open Tezos_legacy_store - -open Tezos_shell_services - -open Tezos_base - -open Tezos_base__TzPervasives - -open Tezos_stdlib_unix - -open Tezos_stdlib - -open Tezos_validation - -open Tezos_context - -open Tezos_protocol_updater)) - (modules legacy_store_builder)) diff --git a/src/lib_store/legacy_store/dune-project b/src/lib_store/legacy_store/dune-project deleted file mode 100644 index 9b3e35603e3c..000000000000 --- a/src/lib_store/legacy_store/dune-project +++ /dev/null @@ -1,3 +0,0 @@ -(lang dune 2.9) -(formatting (enabled_for ocaml)) -(name tezos-legacy-store) diff --git a/src/lib_store/legacy_store/legacy_chain.ml b/src/lib_store/legacy_store/legacy_chain.ml deleted file mode 100644 index 293ad45d9ce9..000000000000 --- a/src/lib_store/legacy_store/legacy_chain.ml +++ /dev/null @@ -1,140 +0,0 @@ -(*****************************************************************************) -(* *) -(* Open Source License *) -(* Copyright (c) 2018 Dynamic Ledger Solutions, Inc. *) -(* *) -(* Permission is hereby granted, free of charge, to any person obtaining a *) -(* copy of this software and associated documentation files (the "Software"),*) -(* to deal in the Software without restriction, including without limitation *) -(* the rights to use, copy, modify, merge, publish, distribute, sublicense, *) -(* and/or sell copies of the Software, and to permit persons to whom the *) -(* Software is furnished to do so, subject to the following conditions: *) -(* *) -(* The above copyright notice and this permission notice shall be included *) -(* in all copies or substantial portions of the Software. *) -(* *) -(* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR*) -(* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, *) -(* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL *) -(* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER*) -(* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING *) -(* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER *) -(* DEALINGS IN THE SOFTWARE. *) -(* *) -(*****************************************************************************) - -module Events = State_events - -let genesis chain_state = - let genesis = Legacy_state.Chain.genesis chain_state in - Legacy_state.Block.read_opt chain_state genesis.block - >|= WithExceptions.Option.get ~loc:__LOC__ - -let known_heads chain_state = - Legacy_state.read_chain_data chain_state (fun chain_store _data -> - Legacy_store.Chain_data.Known_heads.elements chain_store) - >>= fun hashes -> - List.map_p - (fun h -> - Legacy_state.Block.read_opt chain_state h - >|= WithExceptions.Option.get ~loc:__LOC__) - hashes - -let head chain_state = - Legacy_state.read_chain_data chain_state (fun _chain_store data -> - Lwt.return data.current_head) - -let mem chain_state hash = - Legacy_state.read_chain_data chain_state (fun chain_store data -> - if Block_hash.equal (Legacy_state.Block.hash data.current_head) hash then - Lwt.return_true - else Legacy_store.Chain_data.In_main_branch.known (chain_store, hash)) - -type data = Legacy_state.chain_data = { - current_head : Legacy_state.Block.t; - current_mempool : Mempool.t; - live_blocks : Block_hash.Set.t; - live_operations : Operation_hash.Set.t; - test_chain : Chain_id.t option; - save_point : Int32.t * Block_hash.t; - caboose : Int32.t * Block_hash.t; -} - -let data chain_state = - Legacy_state.read_chain_data chain_state (fun _chain_store data -> - Lwt.return data) - -let locator chain_state seed = - data chain_state >>= fun data -> - Legacy_state.compute_locator chain_state data.current_head seed - -let locked_set_head chain_store data block = - let rec pop_blocks ancestor block = - let hash = Legacy_state.Block.hash block in - if Block_hash.equal hash ancestor then Lwt.return_unit - else - Events.(emit pop_block hash) >>= fun () -> - Legacy_store.Chain_data.In_main_branch.remove (chain_store, hash) - >>= fun () -> - Legacy_state.Block.predecessor block >>= function - | Some predecessor -> pop_blocks ancestor predecessor - | None -> assert false - (* Cannot pop the genesis... *) - in - let push_block pred_hash block = - let hash = Legacy_state.Block.hash block in - Events.(emit push_block hash) >>= fun () -> - Legacy_store.Chain_data.In_main_branch.store (chain_store, pred_hash) hash - >>= fun () -> Lwt.return hash - in - Legacy_chain_traversal.new_blocks - ~from_block:data.current_head - ~to_block:block - >>= fun (ancestor, path) -> - let ancestor = Legacy_state.Block.hash ancestor in - pop_blocks ancestor data.current_head >>= fun () -> - List.fold_left_s push_block ancestor path >>= fun _ -> - Legacy_store.Chain_data.Current_head.store - chain_store - (Legacy_state.Block.hash block) - >>= fun () -> - (* TODO more optimized updated of live_{blocks/operations} when the - new head is a direct successor of the current head... - Make sure to do the live blocks computation in `init_head` - when this TODO is resolved. *) - Lwt.return - { - data with - current_head = block; - current_mempool = Mempool.empty; - live_blocks = Block_hash.Set.empty; - live_operations = Operation_hash.Set.empty; - } - -let set_head chain_state block = - (* Legacy_state.Block.max_operations_ttl block - * >>=? fun max_op_ttl -> *) - (* Chain_traversal.live_blocks block max_op_ttl - * >>=? fun (live_blocks, live_operations) -> *) - Legacy_state.update_chain_data chain_state (fun chain_store data -> - locked_set_head chain_store data block >>= fun new_chain_data -> - Lwt.return (Some new_chain_data, data.current_head)) - >>= fun chain_state -> return chain_state - -let test_and_set_head chain_state ~old block = - (* Legacy_state.Block.max_operations_ttl block - * >>=? fun max_op_ttl -> - * Chain_traversal.live_blocks block max_op_ttl - * >>=? fun (live_blocks, live_operations) -> *) - Legacy_state.update_chain_data chain_state (fun chain_store data -> - if not (Legacy_state.Block.equal data.current_head old) then - Lwt.return (None, false) - else - locked_set_head chain_store data block - (* live_blocks live_operations *) - >>= fun new_chain_data -> Lwt.return (Some new_chain_data, true)) - >>= fun chain_state -> return chain_state - -let init_head chain_state = - head chain_state >>= fun block -> - set_head chain_state block >>=? fun (_ : Legacy_state.Block.t) -> return_unit diff --git a/src/lib_store/legacy_store/legacy_chain.mli b/src/lib_store/legacy_store/legacy_chain.mli deleted file mode 100644 index 83e44bd2c5c8..000000000000 --- a/src/lib_store/legacy_store/legacy_chain.mli +++ /dev/null @@ -1,76 +0,0 @@ -(*****************************************************************************) -(* *) -(* Open Source License *) -(* Copyright (c) 2018 Dynamic Ledger Solutions, Inc. *) -(* *) -(* Permission is hereby granted, free of charge, to any person obtaining a *) -(* copy of this software and associated documentation files (the "Software"),*) -(* to deal in the Software without restriction, including without limitation *) -(* the rights to use, copy, modify, merge, publish, distribute, sublicense, *) -(* and/or sell copies of the Software, and to permit persons to whom the *) -(* Software is furnished to do so, subject to the following conditions: *) -(* *) -(* The above copyright notice and this permission notice shall be included *) -(* in all copies or substantial portions of the Software. *) -(* *) -(* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR*) -(* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, *) -(* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL *) -(* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER*) -(* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING *) -(* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER *) -(* DEALINGS IN THE SOFTWARE. *) -(* *) -(*****************************************************************************) - -(** Tezos Shell Module - Managing the current head. *) - -(** The genesis block of the chain. On a test chain, - the test protocol has been promoted as "main" protocol. *) -val genesis : Legacy_state.Chain.t -> Legacy_state.Block.t Lwt.t - -(** The current head of the chain. *) -val head : Legacy_state.Chain.t -> Legacy_state.Block.t Lwt.t - -val locator : - Legacy_state.Chain.t -> Block_locator.seed -> Block_locator.t Lwt.t - -(** All the available chain data. *) -type data = { - current_head : Legacy_state.Block.t; - current_mempool : Mempool.t; - live_blocks : Block_hash.Set.t; - live_operations : Operation_hash.Set.t; - test_chain : Chain_id.t option; - save_point : Int32.t * Block_hash.t; - caboose : Int32.t * Block_hash.t; -} - -(** Reading atomically all the chain data. *) -val data : Legacy_state.Chain.t -> data Lwt.t - -(** The current head and all the known (valid) alternate heads. *) -val known_heads : Legacy_state.Chain.t -> Legacy_state.Block.t list Lwt.t - -(** Test whether a block belongs to the current mainchain. *) -val mem : Legacy_state.Chain.t -> Block_hash.t -> bool Lwt.t - -(** Record a block as the current head of the chain. - It returns the previous head. *) -val set_head : - Legacy_state.Chain.t -> - Legacy_state.Block.t -> - Legacy_state.Block.t tzresult Lwt.t - -(** Atomically change the current head of the chain. - This returns [true] whenever the change succeeded, or [false] - when the current head is not equal to the [old] argument. *) -val test_and_set_head : - Legacy_state.Chain.t -> - old:Legacy_state.Block.t -> - Legacy_state.Block.t -> - bool tzresult Lwt.t - -(** Restores the data about the current head at startup - (recomputes the sets of live blocks and operations). *) -val init_head : Legacy_state.Chain.t -> unit tzresult Lwt.t diff --git a/src/lib_store/legacy_store/legacy_chain_traversal.ml b/src/lib_store/legacy_store/legacy_chain_traversal.ml deleted file mode 100644 index 65cbccd44d33..000000000000 --- a/src/lib_store/legacy_store/legacy_chain_traversal.ml +++ /dev/null @@ -1,170 +0,0 @@ -(*****************************************************************************) -(* *) -(* Open Source License *) -(* Copyright (c) 2018 Dynamic Ledger Solutions, Inc. *) -(* *) -(* Permission is hereby granted, free of charge, to any person obtaining a *) -(* copy of this software and associated documentation files (the "Software"),*) -(* to deal in the Software without restriction, including without limitation *) -(* the rights to use, copy, modify, merge, publish, distribute, sublicense, *) -(* and/or sell copies of the Software, and to permit persons to whom the *) -(* Software is furnished to do so, subject to the following conditions: *) -(* *) -(* The above copyright notice and this permission notice shall be included *) -(* in all copies or substantial portions of the Software. *) -(* *) -(* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR*) -(* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, *) -(* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL *) -(* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER*) -(* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING *) -(* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER *) -(* DEALINGS IN THE SOFTWARE. *) -(* *) -(*****************************************************************************) - -open Legacy_state - -let path (b1 : Block.t) (b2 : Block.t) = - if not (Chain_id.equal (Block.chain_id b1) (Block.chain_id b2)) then - invalid_arg "Chain_traversal.path" ; - let rec loop acc current = - if Block.equal b1 current then Lwt.return_some acc - else - Block.predecessor current >>= function - | Some pred -> loop (current :: acc) pred - | None -> Lwt.return_none - in - loop [] b2 - -let common_ancestor (b1 : Block.t) (b2 : Block.t) = - if not (Chain_id.equal (Block.chain_id b1) (Block.chain_id b2)) then - invalid_arg "Chain_traversal.path" ; - let rec loop (b1 : Block.t) (b2 : Block.t) = - if Block.equal b1 b2 then Lwt.return b1 - else if Time.Protocol.(Block.timestamp b1 <= Block.timestamp b2) then - Block.predecessor b2 >>= function - | None -> assert false - | Some b2 -> loop b1 b2 - else - Block.predecessor b1 >>= function - | None -> assert false - | Some b1 -> loop b1 b2 - in - loop b1 b2 - -let iter_predecessors ?max ?min_fitness ?min_date heads ~f = - let module Local = struct - exception Exit - end in - let compare b1 b2 = - match Fitness.compare (Block.fitness b1) (Block.fitness b2) with - | 0 -> ( - match - Time.Protocol.compare (Block.timestamp b1) (Block.timestamp b2) - with - | 0 -> Block.compare b1 b2 - | res -> res) - | res -> res - in - let (pop, push) = - (* Poor-man priority queue *) - let queue : Block.t list ref = ref [] in - let pop () = - match !queue with - | [] -> None - | b :: bs -> - queue := bs ; - Some b - in - let push b = - let rec loop = function - | [] -> [b] - | b' :: bs' as bs -> - let cmp = compare b b' in - if cmp = 0 then bs else if cmp < 0 then b' :: loop bs' else b :: bs - in - queue := loop !queue - in - (pop, push) - in - let check_count = - match max with - | None -> fun () -> () - | Some max -> - let cpt = ref 0 in - fun () -> - if !cpt >= max then raise Local.Exit ; - incr cpt - in - let check_fitness = - match min_fitness with - | None -> fun _ -> true - | Some min_fitness -> - fun b -> Fitness.compare min_fitness (Block.fitness b) <= 0 - in - let check_date = - match min_date with - | None -> fun _ -> true - | Some min_date -> fun b -> Time.Protocol.(min_date <= Block.timestamp b) - in - let rec loop () = - match pop () with - | None -> Lwt.return_unit - | Some b -> ( - check_count () ; - f b >>= fun () -> - Block.predecessor b >>= function - | None -> loop () - | Some p -> - if check_fitness p && check_date p then push p ; - loop ()) - in - List.iter push heads ; - try loop () with Local.Exit -> Lwt.return_unit - -let iter_predecessors ?max ?min_fitness ?min_date heads ~f = - match heads with - | [] -> Lwt.return_unit - | b :: _ -> - let chain_id = Block.chain_id b in - if - not - (List.for_all - (fun b -> Chain_id.equal chain_id (Block.chain_id b)) - heads) - then invalid_arg "Legacy_state.Helpers.iter_predecessors" ; - iter_predecessors ?max ?min_fitness ?min_date heads ~f - -let new_blocks ~from_block ~to_block = - common_ancestor from_block to_block >>= fun ancestor -> - path ancestor to_block >>= function - | None -> assert false - | Some path -> Lwt.return (ancestor, path) - -let live_blocks block n = - let rec loop bacc oacc chain_state block_head n = - Block.all_operation_hashes block_head >>= fun hashes -> - let oacc = - List.fold_left - (List.fold_left (fun oacc op -> Operation_hash.Set.add op oacc)) - oacc - hashes - in - let bacc = Block_hash.Set.add (Block.hash block_head) bacc in - if n = 0 then return (bacc, oacc) - else - Legacy_state.Block.predecessor block_head >>= function - | None -> - let genesis_hash = (Legacy_state.Chain.genesis chain_state).block in - let block_hash = Block.hash block_head in - if Block_hash.equal genesis_hash block_hash then return (bacc, oacc) - else fail (Legacy_state.Block_not_found block_hash) - | Some predecessor -> loop bacc oacc chain_state predecessor (pred n) - in - loop - Block_hash.Set.empty - Operation_hash.Set.empty - (Block.chain_state block) - block - n diff --git a/src/lib_store/legacy_store/legacy_chain_traversal.mli b/src/lib_store/legacy_store/legacy_chain_traversal.mli deleted file mode 100644 index b52b2cac9a93..000000000000 --- a/src/lib_store/legacy_store/legacy_chain_traversal.mli +++ /dev/null @@ -1,69 +0,0 @@ -(*****************************************************************************) -(* *) -(* Open Source License *) -(* Copyright (c) 2018 Dynamic Ledger Solutions, Inc. *) -(* *) -(* Permission is hereby granted, free of charge, to any person obtaining a *) -(* copy of this software and associated documentation files (the "Software"),*) -(* to deal in the Software without restriction, including without limitation *) -(* the rights to use, copy, modify, merge, publish, distribute, sublicense, *) -(* and/or sell copies of the Software, and to permit persons to whom the *) -(* Software is furnished to do so, subject to the following conditions: *) -(* *) -(* The above copyright notice and this permission notice shall be included *) -(* in all copies or substantial portions of the Software. *) -(* *) -(* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR*) -(* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, *) -(* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL *) -(* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER*) -(* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING *) -(* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER *) -(* DEALINGS IN THE SOFTWARE. *) -(* *) -(*****************************************************************************) - -(** Tezos Shell Module - Chain Traversal API *) - -open Legacy_state - -(** If [h1] is an ancestor of [h2] in the current [state], - then [path state h1 h2] returns the chain of block from - [h1] (excluded) to [h2] (included). Returns [None] otherwise. *) -val path : Block.t -> Block.t -> Block.t list option Lwt.t - -(** [common_ancestor state h1 h2] returns the first common ancestors - in the history of blocks [h1] and [h2]. *) -val common_ancestor : Block.t -> Block.t -> Block.t Lwt.t - -(** [iter_predecessors state blocks f] iter [f] on [blocks] and - their recursive predecessors. Blocks are visited with a - decreasing fitness (then decreasing timestamp). If the optional - argument [max] is provided, the iteration is stopped after [max] - visited block. If [min_fitness] id provided, blocks with a - fitness lower than [min_fitness] are ignored. If [min_date], - blocks with a fitness lower than [min_date] are ignored. *) -val iter_predecessors : - ?max:int -> - ?min_fitness:Fitness.t -> - ?min_date:Time.Protocol.t -> - Block.t list -> - f:(Block.t -> unit Lwt.t) -> - unit Lwt.t - -(** [new_blocks ~from_block ~to_block] returns a pair [(ancestor, - path)], where [ancestor] is the common ancestor of [from_block] - and [to_block] and where [path] is the chain from [ancestor] - (excluded) to [to_block] (included). The function raises an - exception when the two provided blocks do not belong to the same - [chain]. *) -val new_blocks : - from_block:Block.t -> to_block:Block.t -> (Block.t * Block.t list) Lwt.t - -(** [live_blocks b n] return a pair [(blocks,operations)] where - [blocks] is the set of arity [n], that contains [b] and its [n-1] - predecessors. And where [operations] is the set of operations - included in those blocks. -*) -val live_blocks : - Block.t -> int -> (Block_hash.Set.t * Operation_hash.Set.t) tzresult Lwt.t diff --git a/src/lib_store/legacy_store/legacy_snapshots.ml b/src/lib_store/legacy_store/legacy_snapshots.ml deleted file mode 100644 index 1a8f28a7e3df..000000000000 --- a/src/lib_store/legacy_store/legacy_snapshots.ml +++ /dev/null @@ -1,1010 +0,0 @@ -(*****************************************************************************) -(* *) -(* Open Source License *) -(* Copyright (c) 2019 Dynamic Ledger Solutions, Inc. *) -(* Copyright (c) 2019 Nomadic Labs. *) -(* Copyright (c) 2020 Metastate AG *) -(* *) -(* Permission is hereby granted, free of charge, to any person obtaining a *) -(* copy of this software and associated documentation files (the "Software"),*) -(* to deal in the Software without restriction, including without limitation *) -(* the rights to use, copy, modify, merge, publish, distribute, sublicense, *) -(* and/or sell copies of the Software, and to permit persons to whom the *) -(* Software is furnished to do so, subject to the following conditions: *) -(* *) -(* The above copyright notice and this permission notice shall be included *) -(* in all copies or substantial portions of the Software. *) -(* *) -(* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR*) -(* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, *) -(* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL *) -(* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER*) -(* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING *) -(* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER *) -(* DEALINGS IN THE SOFTWARE. *) -(* *) -(*****************************************************************************) - -type status = - | Export_unspecified_hash of Block_hash.t - | Export_info of History_mode.Legacy.t * Block_hash.t * Int32.t - | Export_success of string - | Set_history_mode of History_mode.Legacy.t - | Import_info of string - | Import_unspecified_hash - | Import_loading - | Set_head of Block_hash.t - | Import_success of string - | Reconstruct_start_default - | Reconstruct_end_default of Block_hash.t - | Reconstruct_enum - | Reconstruct_success - -let status_pp ppf = function - | Export_unspecified_hash h -> - Format.fprintf - ppf - "There is no block hash specified with the `--block` option. Using %a \ - (last checkpoint)" - Block_hash.pp - h - | Export_info (hm, h, l) -> - Format.fprintf - ppf - "Exporting a snapshot in %a mode, targeting block hash %a at level %a" - History_mode.Legacy.pp - hm - Block_hash.pp - h - Format.pp_print_int - (Int32.to_int l) - | Export_success filename -> - Format.fprintf ppf "@[Successful export: %s@]" filename - | Set_history_mode hm -> - Format.fprintf ppf "Setting history-mode to %a" History_mode.Legacy.pp hm - | Import_info filename -> - Format.fprintf ppf "Importing data from snapshot file %s" filename - | Import_unspecified_hash -> - Format.fprintf - ppf - "You may consider using the --block argument to verify \ - that the block imported is the one you expected" - | Import_loading -> - Format.fprintf - ppf - "Retrieving and validating data. This can take a while, please bear \ - with us" - | Set_head h -> - Format.fprintf ppf "Setting current head to block %a" Block_hash.pp h - | Import_success filename -> - Format.fprintf ppf "@[Successful import from file %s@]" filename - | Reconstruct_start_default -> - Format.fprintf ppf "Starting reconstruct from genesis" - | Reconstruct_end_default h -> - Format.fprintf - ppf - "Starting reconstruct toward the predecessor of the current head (%a)" - Block_hash.pp - h - | Reconstruct_enum -> - Format.fprintf ppf "Enumerating all blocks to reconstruct" - | Reconstruct_success -> - Format.fprintf ppf "The storage was successfully reconstructed." - -module Definition = struct - let section = None - - let name = "snapshot" - - type t = status Time.System.stamped - - let encoding = - let open Data_encoding in - Time.System.stamped_encoding - @@ union - [ - case - (Tag 0) - ~title:"Export_unspecified_hash" - Block_hash.encoding - (function Export_unspecified_hash h -> Some h | _ -> None) - (fun h -> Export_unspecified_hash h); - case - (Tag 1) - ~title:"Export_info" - (obj3 - (req "history_mode" History_mode.Legacy.encoding) - (req "block_hash" Block_hash.encoding) - (req "level" int32)) - (function Export_info (hm, h, l) -> Some (hm, h, l) | _ -> None) - (fun (hm, h, l) -> Export_info (hm, h, l)); - case - (Tag 2) - ~title:"Export_success" - string - (function Export_success s -> Some s | _ -> None) - (fun s -> Export_success s); - case - (Tag 3) - ~title:"Set_history_mode" - History_mode.Legacy.encoding - (function Set_history_mode hm -> Some hm | _ -> None) - (fun hm -> Set_history_mode hm); - case - (Tag 4) - ~title:"Import_info" - string - (function Import_info s -> Some s | _ -> None) - (fun s -> Import_info s); - case - (Tag 5) - ~title:"Import_unspecified_hash" - empty - (function Import_unspecified_hash -> Some () | _ -> None) - (fun () -> Import_unspecified_hash); - case - (Tag 6) - ~title:"Import_loading" - empty - (function Import_loading -> Some () | _ -> None) - (fun () -> Import_loading); - case - (Tag 7) - ~title:"Set_head" - Block_hash.encoding - (function Set_head h -> Some h | _ -> None) - (fun h -> Set_head h); - case - (Tag 8) - ~title:"Import_success" - string - (function Import_success s -> Some s | _ -> None) - (fun s -> Import_success s); - case - (Tag 9) - ~title:"Reconstruct_start_default" - empty - (function Reconstruct_start_default -> Some () | _ -> None) - (fun () -> Reconstruct_start_default); - case - (Tag 10) - ~title:"Reconstruct_end_default" - Block_hash.encoding - (function Reconstruct_end_default h -> Some h | _ -> None) - (fun h -> Reconstruct_end_default h); - case - (Tag 11) - ~title:"Reconstruct_enum" - empty - (function Reconstruct_enum -> Some () | _ -> None) - (fun () -> Reconstruct_enum); - case - (Tag 12) - ~title:"Reconstruct_success" - empty - (function Reconstruct_success -> Some () | _ -> None) - (fun () -> Reconstruct_success); - ] - - let pp ~short:_ ppf (status : t) = - Format.fprintf ppf "%a" status_pp status.data - - let doc = "Snapshots status." - - let level (status : t) = - match status.data with - | Export_unspecified_hash _ | Export_info _ | Export_success _ - | Set_history_mode _ | Import_info _ | Import_unspecified_hash - | Import_loading | Set_head _ | Import_success _ | Reconstruct_start_default - | Reconstruct_end_default _ | Reconstruct_enum | Reconstruct_success -> - Internal_event.Notice -end - -module Event_snapshot = Internal_event.Make (Definition) - -let lwt_emit (status : status) = - let time = Systime_os.now () in - Event_snapshot.emit - ~section:(Internal_event.Section.make_sanitized [Definition.name]) - (fun () -> Time.System.stamp ~time status) - >>= function - | Ok () -> Lwt.return_unit - | Error el -> - Format.kasprintf Lwt.fail_with "Snapshot_event.emit: %a" pp_print_trace el - -type error += - | Wrong_snapshot_export of History_mode.Legacy.t * History_mode.Legacy.t - -type wrong_block_export_kind = - | Pruned of Block_hash.t - | Too_few_predecessors of Block_hash.t - | Unknown_block of string - -let pp_wrong_block_export_kind ppf = function - | Pruned h -> - Format.fprintf ppf "block %a because it is pruned" Block_hash.pp h - | Too_few_predecessors h -> - Format.fprintf - ppf - "block %a because it does not have enough predecessors" - Block_hash.pp - h - | Unknown_block str -> - Format.fprintf ppf "block %s because it cannot be found" str - -let wrong_block_export_kind_encoding = - let open Data_encoding in - union - [ - case - (Tag 0) - ~title:"pruned" - Block_hash.encoding - (function Pruned h -> Some h | _ -> None) - (fun h -> Pruned h); - case - (Tag 1) - ~title:"too_few_predecessors" - Block_hash.encoding - (function Too_few_predecessors h -> Some h | _ -> None) - (fun h -> Too_few_predecessors h); - case - (Tag 2) - ~title:"unknown_hash" - string - (function Unknown_block s -> Some s | _ -> None) - (fun s -> Unknown_block s); - ] - -type error += Wrong_block_export of wrong_block_export_kind - -type error += Inconsistent_imported_block of Block_hash.t * Block_hash.t - -type error += Snapshot_import_failure of string - -type error += Wrong_protocol_hash of Protocol_hash.t - -type error += - | Inconsistent_operation_hashes of - (Operation_list_list_hash.t * Operation_list_list_hash.t) - -type error += Inconsistent_operation_hashes_lengths - -type error += Invalid_block_specification of string - -let () = - let open Data_encoding in - register_error_kind - `Permanent - ~id:"WrongSnapshotExport" - ~title:"Wrong snapshot export" - ~description: - "Snapshot exports is not compatible with the current configuration." - ~pp:(fun ppf (src, dst) -> - Format.fprintf - ppf - "Cannot export a %a snapshot from a %a node." - History_mode.Legacy.pp - dst - History_mode.Legacy.pp - src) - (obj2 - (req "src" History_mode.Legacy.encoding) - (req "dst" History_mode.Legacy.encoding)) - (function Wrong_snapshot_export (src, dst) -> Some (src, dst) | _ -> None) - (fun (src, dst) -> Wrong_snapshot_export (src, dst)) ; - register_error_kind - `Permanent - ~id:"WrongBlockExport" - ~title:"Wrong block export" - ~description:"The block to export in the snapshot is not valid." - ~pp:(fun ppf kind -> - Format.fprintf - ppf - "Fails to export snapshot using the %a." - pp_wrong_block_export_kind - kind) - (obj1 (req "wrong_block_export" wrong_block_export_kind_encoding)) - (function Wrong_block_export kind -> Some kind | _ -> None) - (fun kind -> Wrong_block_export kind) ; - register_error_kind - `Permanent - ~id:"InconsistentImportedBlock" - ~title:"Inconsistent imported block" - ~description:"The imported block is not the expected one." - ~pp:(fun ppf (got, exp) -> - Format.fprintf - ppf - "The block contained in the file is %a instead of %a." - Block_hash.pp - got - Block_hash.pp - exp) - (obj2 - (req "block_hash" Block_hash.encoding) - (req "block_hash_expected" Block_hash.encoding)) - (function - | Inconsistent_imported_block (got, exp) -> Some (got, exp) | _ -> None) - (fun (got, exp) -> Inconsistent_imported_block (got, exp)) ; - register_error_kind - `Permanent - ~id:"SnapshotImportFailure" - ~title:"Snapshot import failure" - ~description:"The imported snapshot is malformed." - ~pp:(fun ppf msg -> - Format.fprintf - ppf - "The data contained in the snapshot is not valid. The import mechanism \ - failed to validate the file: %s." - msg) - (obj1 (req "message" string)) - (function Snapshot_import_failure str -> Some str | _ -> None) - (fun str -> Snapshot_import_failure str) ; - register_error_kind - `Permanent - ~id:"WrongProtocolHash" - ~title:"Wrong protocol hash" - ~description:"Wrong protocol hash" - ~pp:(fun ppf p -> - Format.fprintf - ppf - "Wrong protocol hash (%a) found in snapshot. Snapshot is corrupted." - Protocol_hash.pp - p) - (obj1 (req "protocol_hash" Protocol_hash.encoding)) - (function Wrong_protocol_hash p -> Some p | _ -> None) - (fun p -> Wrong_protocol_hash p) ; - register_error_kind - `Permanent - ~id:"InconsistentOperationHashes" - ~title:"Inconsistent operation hashes" - ~description:"The operations given do not match their hashes." - ~pp:(fun ppf (oph, oph') -> - Format.fprintf - ppf - "Inconsistent operation hashes. Expected: %a, got: %a." - Operation_list_list_hash.pp - oph - Operation_list_list_hash.pp - oph') - (obj2 - (req "expected_operation_hashes" Operation_list_list_hash.encoding) - (req "received_operation_hashes" Operation_list_list_hash.encoding)) - (function - | Inconsistent_operation_hashes (oph, oph') -> Some (oph, oph') - | _ -> None) - (fun (oph, oph') -> Inconsistent_operation_hashes (oph, oph')) ; - register_error_kind - `Permanent - ~id:"InconsistentOperationHashesLengths" - ~title:"Inconsistent operation hashes lengths" - ~description:"Different number of operations and hashes given." - ~pp:(fun ppf () -> - Format.pp_print_string ppf "Inconsistent operation hashes lengths") - unit - (function Inconsistent_operation_hashes_lengths -> Some () | _ -> None) - (fun () -> Inconsistent_operation_hashes_lengths) ; - register_error_kind - `Permanent - ~id:"InvalidBlockSpecification" - ~title:"Invalid block specification" - ~description:"Invalid specification of block to import" - ~pp:(fun ppf str -> - Format.fprintf - ppf - "Cannot check the given block to import based on %s. You must specify \ - a valid block hash." - str) - (obj1 (req "str" string)) - (function Invalid_block_specification s -> Some s | _ -> None) - (fun s -> Invalid_block_specification s) - -let compute_export_limit block_store chain_data_store block_header - export_rolling = - let block_hash = Block_header.hash block_header in - (Legacy_store.Block.Contents.read_opt (block_store, block_hash) >>= function - | Some contents -> return contents - | None -> fail (Wrong_block_export (Pruned block_hash))) - >>=? fun {max_operations_ttl; _} -> - if not export_rolling then - Legacy_store.Chain_data.Caboose.read chain_data_store - >>=? fun (caboose_level, _) -> return (max 1l caboose_level) - else - let limit = - Int32.( - sub block_header.Block_header.shell.level (of_int max_operations_ttl)) - in - (* (\* fails when the limit exceeds the genesis or the genesis is - * included in the export limit *\) - * fail_when - * (limit <= 0l) - * (Wrong_block_export (Too_few_predecessors block_hash)) - * >>=? fun () -> *) - return (max 1l limit) - -(** When called with a block, returns its predecessor if it exists and - its protocol_data if the block is a transition block (i.e. protocol - level changing block) or when there is no more predecessor. *) -let pruned_block_iterator index block_store limit header = - if header.Block_header.shell.level <= limit then - Context.legacy_get_protocol_data_from_header index header - >>= fun protocol_data -> return (None, Some protocol_data) - else - let pred_hash = header.Block_header.shell.predecessor in - Legacy_state.Block.Header.read (block_store, pred_hash) - >>=? fun pred_header -> - Legacy_store.Block.Operations.bindings (block_store, pred_hash) - >>= fun pred_operations -> - Legacy_store.Block.Operation_hashes.bindings (block_store, pred_hash) - >>= fun pred_operation_hashes -> - let pruned_block = - { - Context.Pruned_block_legacy.block_header = pred_header; - operations = pred_operations; - operation_hashes = pred_operation_hashes; - } - in - let header_proto_level = header.Block_header.shell.proto_level in - let pred_header_proto_level = pred_header.Block_header.shell.proto_level in - if header_proto_level <> pred_header_proto_level then - Context.legacy_get_protocol_data_from_header index header - >>= fun proto_data -> return (Some pruned_block, Some proto_data) - else return (Some pruned_block, None) - -let parse_block_arg = function - | None -> return_none - | Some str -> ( - match Block_services.parse_block str with - | Ok v -> return_some v - | Error err -> failwith "Invalid value for `--block`: %s" err) - -let export ?(export_rolling = false) ~context_root ~store_root ~genesis filename - block_hash = - Legacy_state.init ~context_root ~store_root genesis ~readonly:true - >>=? fun (state, _chain_state, context_index, history_mode) -> - Legacy_store.init store_root >>=? fun store -> - let chain_id = Chain_id.of_block_hash genesis.block in - let chain_store = Legacy_store.Chain.get store chain_id in - let chain_data_store = Legacy_store.Chain_data.get chain_store in - let block_store = Legacy_store.Block.get chain_store in - (match history_mode with - | Archive | Full -> return_unit - | Rolling as history_mode -> - if export_rolling then return_unit - else fail (Wrong_snapshot_export (history_mode, History_mode.Legacy.Full))) - >>=? fun () -> - (Legacy_state.Block.Header.read_opt (block_store, block_hash) >>= function - | None -> - fail - (Wrong_block_export (Unknown_block (Block_hash.to_b58check block_hash))) - | Some block_header -> - let export_mode = - if export_rolling then History_mode.Legacy.Rolling else Full - in - lwt_emit - (Export_info (export_mode, block_hash, block_header.shell.level)) - >>= fun () -> - (* Get block predecessor's block header *) - Legacy_store.Block.Predecessors.read (block_store, block_hash) 0 - >>=? fun pred_block_hash -> - Legacy_state.Block.Header.read (block_store, pred_block_hash) - >>=? fun pred_block_header -> - (* Get operation list *) - let validations_passes = block_header.shell.validation_passes in - List.map_es - (fun i -> - Legacy_store.Block.Operations.read (block_store, block_hash) i) - (0 -- (validations_passes - 1)) - >>=? fun operations -> - Legacy_store.Block.Block_metadata_hash.read_opt - (block_store, pred_block_hash) - >>= fun predecessor_block_metadata_hash -> - (if pred_block_header.shell.validation_passes = 0 then return_none - else - Legacy_store.Block.Operations_metadata_hashes.known - (block_store, pred_block_hash) - 0 - >>= function - | false -> return_none - | true -> - List.map_es - (fun i -> - Legacy_store.Block.Operations_metadata_hashes.read - (block_store, pred_block_hash) - i) - (0 -- (pred_block_header.shell.validation_passes - 1)) - >|=? Option.some) - >>=? fun predecessor_ops_metadata_hashes -> - compute_export_limit - block_store - chain_data_store - block_header - export_rolling - >>=? fun export_limit -> - let iterator = - pruned_block_iterator context_index block_store export_limit - in - let block_data = {Context.Block_data_legacy.block_header; operations} in - return - ( pred_block_header, - block_data, - predecessor_block_metadata_hash, - predecessor_ops_metadata_hashes, - export_mode, - iterator )) - >>=? fun data_to_dump -> - Context.legacy_dump_snapshot context_index data_to_dump ~filename - >>=? fun () -> - lwt_emit (Export_success filename) >>= fun () -> - Legacy_store.close store ; - Legacy_state.close state >>= fun () -> return_unit - -open Filename.Infix - -let context_dir data_dir = data_dir // "context" - -let store_dir data_dir = data_dir // "store" - -let check_operations_consistency block_header operations operation_hashes = - (* Compute operations hashes and compare *) - (List.iter2_e - ~when_different_lengths:Inconsistent_operation_hashes_lengths - (fun (_, op) (_, oph) -> - let expected_op_hash = List.map Operation.hash op in - List.iter2 - ~when_different_lengths:Inconsistent_operation_hashes_lengths - (fun expected found -> assert (Operation_hash.equal expected found)) - expected_op_hash - oph) - operations - operation_hashes - |> function - | Ok _ as ok -> ok - | Error err -> error err) - (* To make a trace *) - >>? fun () -> - (* Check header hashes based on Merkle tree *) - let hashes = - List.rev_map (fun (_, opl) -> List.map Operation.hash opl) operations - in - let computed_hash = - Operation_list_list_hash.compute - (List.map Operation_list_hash.compute hashes) - in - let are_oph_equal = - Operation_list_list_hash.equal - computed_hash - block_header.Block_header.shell.operations_hash - in - error_unless - are_oph_equal - (Inconsistent_operation_hashes - (computed_hash, block_header.Block_header.shell.operations_hash)) - -let check_operations_consistency block_header operations operation_hashes = - Lwt.return - @@ check_operations_consistency block_header operations operation_hashes - -let compute_predecessors ~genesis_hash oldest_level block_hashes i = - let rec step s d acc = - if oldest_level = 1l && i - d = -1 then List.rev ((s, genesis_hash) :: acc) - else if i - d < 0 then List.rev acc - else step (s + 1) (d * 2) ((s, block_hashes.(i - d)) :: acc) - in - step 0 1 [] - -let check_context_hash_consistency block_validation_result block_header = - fail_unless - (Context_hash.equal - block_validation_result.Tezos_validation.Block_validation.context_hash - block_header.Block_header.shell.context) - (Snapshot_import_failure "resulting context hash does not match") - -let set_history_mode store history_mode = - lwt_emit (Set_history_mode history_mode) >>= fun () -> - Legacy_store.Configuration.History_mode.store store history_mode >>= fun () -> - return_unit - -(* WARNING - In legacy snasphot there is no test chain fork -*) -let store_new_head chain_state chain_data ~genesis block_header operations - block_validation_result = - let ({ - validation_store; - block_metadata; - ops_metadata; - block_metadata_hash; - ops_metadata_hashes; - } - : Tezos_validation.Block_validation.result) = - block_validation_result - in - Legacy_state.Block.store - chain_state - block_header - block_metadata - operations - ops_metadata - block_metadata_hash - ops_metadata_hashes - ~forking_testchain:false - validation_store - >>=? fun new_head -> - match new_head with - | None -> - (* Should not happen as the data-dir must be empty *) - fail - (Snapshot_import_failure - "a chain head is already registered in the store") - | Some new_head -> - (* New head is set*) - Legacy_store.Chain_data.Known_heads.remove chain_data genesis - >>= fun () -> - Legacy_store.Chain_data.Known_heads.store - chain_data - (Legacy_state.Block.hash new_head) - >>= fun () -> - Legacy_store.Chain_data.Current_head.store - chain_data - (Legacy_state.Block.hash new_head) - >>= fun () -> return_unit - -let update_checkpoint chain_state checkpoint_header = - let block_hash = Block_header.hash checkpoint_header in - (* Imported block is set as the current checkpoint/save_point … *) - let new_checkpoint = - (checkpoint_header.Block_header.shell.level, block_hash) - in - Legacy_state.Chain.set_checkpoint chain_state checkpoint_header >>= fun () -> - Lwt.return new_checkpoint - -let update_savepoint chain_state new_savepoint = - Legacy_state.update_chain_data chain_state (fun store data -> - let new_data = {data with save_point = new_savepoint} in - Legacy_store.Chain_data.Save_point.store store new_savepoint >>= fun () -> - Lwt.return (Some new_data, ())) - -let update_caboose chain_data ~genesis block_header oldest_header max_op_ttl = - let oldest_level = oldest_header.Block_header.shell.level in - let caboose_level = if oldest_level = 1l then 0l else oldest_level in - let caboose_hash = - if oldest_level = 1l then genesis else Block_header.hash oldest_header - in - let minimal_caboose_level = - Int32.(sub block_header.Block_header.shell.level (of_int max_op_ttl)) - in - fail_unless - Compare.Int32.(caboose_level <= minimal_caboose_level) - (Snapshot_import_failure - (Format.sprintf "caboose level (%ld) is not valid" caboose_level)) - >>=? fun () -> - Legacy_store.Chain_data.Caboose.store chain_data (caboose_level, caboose_hash) - >>= fun () -> return_unit - -let import_protocol_data index store block_hash_arr level_oldest_block - new_header (level, protocol_data) = - (* Retrieve the original context hash of the block. *) - let delta = Int32.(to_int (sub level level_oldest_block)) in - (if delta = Array.length block_hash_arr then Lwt.return new_header - else - let pruned_block_hash = block_hash_arr.(delta) in - let block_store = Legacy_store.Block.get store in - Legacy_state.Block.Header.read_opt (block_store, pruned_block_hash) - >>= function - | None -> assert false - | Some block_header -> Lwt.return block_header) - >>= fun block_header -> - let expected_context_hash = block_header.Block_header.shell.context in - (* Retrieve the input info. *) - let info = protocol_data.Context.Protocol_data_legacy.info in - let test_chain = protocol_data.test_chain_status in - let data_hash = protocol_data.data_key in - let parents = protocol_data.parents in - let protocol_hash = protocol_data.protocol_hash in - let predecessor_block_metadata_hash = - protocol_data.predecessor_block_metadata_hash - in - let predecessor_ops_metadata_hash = - protocol_data.predecessor_ops_metadata_hash - in - (* Validate the context hash consistency, and so the protocol data. *) - Context.validate_context_hash_consistency_and_commit - ~author:info.author - ~timestamp:info.timestamp - ~message:info.message - ~data_hash - ~parents - ~expected_context_hash - ~test_chain - ~protocol_hash - ~predecessor_block_metadata_hash - ~predecessor_ops_metadata_hash - ~index - >>= function - | true -> - let protocol_level = block_header.shell.proto_level in - let block_level = block_header.shell.level in - Legacy_store.Chain.Protocol_info.store - store - protocol_level - (protocol_hash, block_level) - >>= fun () -> return_unit - | false -> fail (Wrong_protocol_hash protocol_hash) - -let import_protocol_data_list index store block_hash_arr level_oldest_block - new_head protocol_data = - let rec aux = function - | [] -> return_unit - | (level, protocol_data) :: xs -> - import_protocol_data - index - store - block_hash_arr - level_oldest_block - new_head - (level, protocol_data) - >>=? fun () -> aux xs - in - aux protocol_data - -let verify_predecessors header_opt pred_hash = - match header_opt with - | None -> return_unit - | Some header -> - fail_unless - (header.Block_header.shell.level >= 2l - && Block_hash.equal header.shell.predecessor pred_hash) - (Snapshot_import_failure "predecessors inconsistency") - -let verify_oldest_header oldest_header genesis_hash = - let oldest_level = oldest_header.Block_header.shell.level in - fail_unless - (oldest_level >= 1l - || Compare.Int32.(oldest_level = 1l) - && Block_hash.equal - oldest_header.Block_header.shell.predecessor - genesis_hash) - (Snapshot_import_failure "oldest level inconsistency") - -let block_validation succ_header_opt header_hash - {Context.Pruned_block_legacy.block_header; operations; operation_hashes} = - verify_predecessors succ_header_opt header_hash >>=? fun () -> - check_operations_consistency block_header operations operation_hashes - >>=? fun () -> return_unit - -let import ?patch_context ~data_dir ~user_activated_upgrades - ~user_activated_protocol_overrides ~dir_cleaner ~genesis filename ~block = - lwt_emit (Import_info filename) >>= fun () -> - (match block with - | None -> lwt_emit Import_unspecified_hash - | Some _ -> Lwt.return_unit) - >>= fun () -> - lwt_emit Import_loading >>= fun () -> - let context_root = context_dir data_dir in - let store_root = store_dir data_dir in - let chain_id = Chain_id.of_block_hash genesis.Genesis.block in - Legacy_state.init ~context_root ~store_root genesis ?patch_context - >>=? fun (state, chain_state, context_index, _history_mode) -> - Legacy_store.init store_root >>=? fun store -> - let chain_store = Legacy_store.Chain.get store chain_id in - let chain_data = Legacy_store.Chain_data.get chain_store in - let block_store = Legacy_store.Block.get chain_store in - Lwt.try_bind - (fun () -> - let k_store_pruned_blocks data = - Legacy_store.with_atomic_rw store (fun () -> - List.iter_s - (fun (pruned_header_hash, pruned_block) -> - Legacy_store.Block.Pruned_contents.store - (block_store, pruned_header_hash) - { - header = - pruned_block.Context.Pruned_block_legacy.block_header; - } - >>= fun () -> - List.iter_s - (fun (i, v) -> - Legacy_store.Block.Operations.store - (block_store, pruned_header_hash) - i - v) - pruned_block.operations - >>= fun () -> - List.iter_s - (fun (i, v) -> - Legacy_store.Block.Operation_hashes.store - (block_store, pruned_header_hash) - i - v) - pruned_block.operation_hashes) - data) - >>= fun () -> return_unit - in - (* Restore context and fetch data *) - Context.legacy_restore_contexts - context_index - ~filename - k_store_pruned_blocks - block_validation - >>=? fun ( predecessor_block_header, - meta, - predecessor_block_metadata_hash, - predecessor_ops_metadata_hashes, - history_mode, - oldest_header_opt, - rev_block_hashes, - protocol_data ) -> - let oldest_header = - WithExceptions.Option.get ~loc:__LOC__ oldest_header_opt - in - let block_hashes_arr = Array.of_list rev_block_hashes in - let write_predecessors_table to_write = - Legacy_store.with_atomic_rw store (fun () -> - List.iter_s - (fun (current_hash, predecessors_list) -> - List.iter_s - (fun (l, h) -> - Legacy_store.Block.Predecessors.store - (block_store, current_hash) - l - h) - predecessors_list - >>= fun () -> - match predecessors_list with - | (0, pred_hash) :: _ -> - Legacy_store.Chain_data.In_main_branch.store - (chain_data, pred_hash) - current_hash - | [] -> Lwt.return_unit - | _ :: _ -> assert false) - to_write) - in - List.fold_left_s - (fun (cpt, to_write) current_hash -> - Tezos_stdlib_unix.Utils.display_progress - ~refresh_rate:(cpt, 1_000) - (fun f -> - f "Computing predecessors table %dK elements%!" (cpt / 1_000)) ; - (if (cpt + 1) mod 5_000 = 0 then - write_predecessors_table to_write >>= fun () -> Lwt.return_nil - else Lwt.return to_write) - >>= fun to_write -> - let predecessors_list = - compute_predecessors - ~genesis_hash:genesis.block - oldest_header.shell.level - block_hashes_arr - cpt - in - Lwt.return (cpt + 1, (current_hash, predecessors_list) :: to_write)) - (0, []) - rev_block_hashes - >>= fun (_, to_write) -> - write_predecessors_table to_write >>= fun () -> - Tezos_stdlib_unix.Utils.display_progress_end () ; - (* Process data imported from snapshot *) - let {Context.Block_data_legacy.block_header; operations} = meta in - let block_hash = Block_header.hash block_header in - (* Checks that the block hash imported by the snapshot is the expected one *) - (parse_block_arg block >>=? function - | Some str -> ( - match str with - | `Hash (bh, _) -> - fail_unless - (Block_hash.equal bh block_hash) - (Inconsistent_imported_block (bh, block_hash)) - | _ -> - fail (Invalid_block_specification (Block_services.to_string str)) - ) - | None -> return_unit) - >>=? fun () -> - lwt_emit (Set_head (Block_header.hash block_header)) >>= fun () -> - let pred_context_hash = predecessor_block_header.shell.context in - Context.checkout_exn context_index pred_context_hash - >>= fun predecessor_context -> - let max_operations_ttl = - Int32.to_int predecessor_block_header.shell.level - in - let predecessor_ops_metadata_hash = - Option.map - (fun hashes -> - List.map Operation_metadata_list_hash.compute hashes - |> Operation_metadata_list_list_hash.compute) - predecessor_ops_metadata_hashes - in - Option.iter_s - (Legacy_store.Block.Block_metadata_hash.store - (block_store, Block_header.hash predecessor_block_header)) - predecessor_block_metadata_hash - >>= fun () -> - Option.iter_s - (Lwt_list.iteri_p (fun i hashes -> - Legacy_store.Block.Operations_metadata_hashes.store - (block_store, Block_header.hash predecessor_block_header) - i - hashes)) - predecessor_ops_metadata_hashes - >>= fun () -> - let env = - { - Block_validation.max_operations_ttl; - chain_id; - predecessor_block_header; - predecessor_block_metadata_hash; - predecessor_ops_metadata_hash; - predecessor_context; - user_activated_upgrades; - user_activated_protocol_overrides; - } - in - (* ... we can now call apply ... *) - Tezos_validation.Block_validation.apply - env - block_header - operations - ~cache:`Lazy - >>=? fun {result = block_validation_result; _} -> - check_context_hash_consistency - block_validation_result.validation_store - block_header - >>=? fun () -> - verify_oldest_header oldest_header genesis.block >>=? fun () -> - set_history_mode store history_mode >>=? fun () -> - (* ... and we import protocol data...*) - import_protocol_data_list - context_index - chain_store - block_hashes_arr - oldest_header.Block_header.shell.level - block_header - protocol_data - >>=? fun () -> - (* Everything is ok. We can store the new head *) - store_new_head - chain_state - chain_data - ~genesis:genesis.block - block_header - operations - block_validation_result - >>=? fun () -> - (* Update history mode flags *) - update_checkpoint chain_state block_header >>= fun new_checkpoint -> - update_savepoint chain_state new_checkpoint >>= fun () -> - update_caboose - chain_data - ~genesis:genesis.block - block_header - oldest_header - block_validation_result.validation_store.max_operations_ttl - >>=? fun () -> - (* WARNING - In legacy snapshot, there is no storage reconstruction - *) - (* ( match reconstruct with - * | true -> - * if history_mode = History_mode.Legacy.Full then - * reconstruct_storage - * store - * context_index - * chain_id - * ~user_activated_upgrades - * ~user_activated_protocol_overrides - * block_store - * chain_state - * chain_store - * rev_block_hashes - * >>=? fun () -> - * lwt_emit Reconstruct_success >>= fun () -> return_unit - * else fail (Cannot_reconstruct history_mode) - * | false -> - * return_unit ) - * >>=? fun () -> *) - Legacy_store.close store ; - Legacy_state.close state >>= fun () -> return_unit) - (function - | Ok () -> lwt_emit (Import_success filename) >>= fun () -> return_unit - | Error errors -> - dir_cleaner data_dir >>= fun () -> Lwt.return (Error errors)) - (fun exn -> dir_cleaner data_dir >>= fun () -> Lwt.fail exn) diff --git a/src/lib_store/legacy_store/legacy_snapshots.mli b/src/lib_store/legacy_store/legacy_snapshots.mli deleted file mode 100644 index 68bf6c0f7d5b..000000000000 --- a/src/lib_store/legacy_store/legacy_snapshots.mli +++ /dev/null @@ -1,51 +0,0 @@ -(*****************************************************************************) -(* *) -(* Open Source License *) -(* Copyright (c) 2019 Dynamic Ledger Solutions, Inc. *) -(* Copyright (c) 2019 Nomadic Labs. *) -(* *) -(* Permission is hereby granted, free of charge, to any person obtaining a *) -(* copy of this software and associated documentation files (the "Software"),*) -(* to deal in the Software without restriction, including without limitation *) -(* the rights to use, copy, modify, merge, publish, distribute, sublicense, *) -(* and/or sell copies of the Software, and to permit persons to whom the *) -(* Software is furnished to do so, subject to the following conditions: *) -(* *) -(* The above copyright notice and this permission notice shall be included *) -(* in all copies or substantial portions of the Software. *) -(* *) -(* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR*) -(* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, *) -(* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL *) -(* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER*) -(* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING *) -(* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER *) -(* DEALINGS IN THE SOFTWARE. *) -(* *) -(*****************************************************************************) - -type error += Inconsistent_operation_hashes_lengths - -type error += - | Inconsistent_operation_hashes of - (Operation_list_list_hash.t * Operation_list_list_hash.t) - -val export : - ?export_rolling:bool -> - context_root:string -> - store_root:string -> - genesis:Genesis.t -> - string -> - Block_hash.t -> - unit tzresult Lwt.t - -val import : - ?patch_context:(Context.t -> Context.t tzresult Lwt.t) -> - data_dir:string -> - user_activated_upgrades:User_activated.upgrades -> - user_activated_protocol_overrides:User_activated.protocol_overrides -> - dir_cleaner:(string -> unit Lwt.t) -> - genesis:Genesis.t -> - string -> - block:string option -> - unit tzresult Lwt.t diff --git a/src/lib_store/legacy_store/legacy_state.ml b/src/lib_store/legacy_store/legacy_state.ml deleted file mode 100644 index 0b8ec698a7e2..000000000000 --- a/src/lib_store/legacy_store/legacy_state.ml +++ /dev/null @@ -1,1892 +0,0 @@ -(*****************************************************************************) -(* *) -(* Open Source License *) -(* Copyright (c) 2018 Dynamic Ledger Solutions, Inc. *) -(* Copyright (c) 2020 Metastate AG *) -(* *) -(* Permission is hereby granted, free of charge, to any person obtaining a *) -(* copy of this software and associated documentation files (the "Software"),*) -(* to deal in the Software without restriction, including without limitation *) -(* the rights to use, copy, modify, merge, publish, distribute, sublicense, *) -(* and/or sell copies of the Software, and to permit persons to whom the *) -(* Software is furnished to do so, subject to the following conditions: *) -(* *) -(* The above copyright notice and this permission notice shall be included *) -(* in all copies or substantial portions of the Software. *) -(* *) -(* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR*) -(* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, *) -(* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL *) -(* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER*) -(* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING *) -(* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER *) -(* DEALINGS IN THE SOFTWARE. *) -(* *) -(*****************************************************************************) - -[@@@ocaml.warning "-30"] - -module Events = State_events -open Validation_errors - -module Shared = struct - type 'a t = {data : 'a; lock : Lwt_mutex.t} - - let create data = {data; lock = Lwt_mutex.create ()} - - let use {data; lock} f = Lwt_mutex.with_lock lock (fun () -> f data) -end - -type global_state = { - global_data : global_data Shared.t; - protocol_store : Legacy_store.Protocol.store Shared.t; - main_chain : Chain_id.t; - protocol_watcher : Protocol_hash.t Lwt_watcher.input; - block_watcher : block Lwt_watcher.input; -} - -and global_data = { - chains : chain_state Chain_id.Table.t; - global_store : Legacy_store.t; - context_index : Context.index; -} - -and chain_state = { - (* never take the lock on 'block_store' when holding - the lock on 'chain_data'. *) - global_state : global_state; - chain_id : Chain_id.t; - genesis : Genesis.t; - faked_genesis_hash : Block_hash.t; - expiration : Time.Protocol.t option; - allow_forked_chain : bool; - block_store : Legacy_store.Block.store Shared.t; - context_index : Context.index Shared.t; - block_watcher : block Lwt_watcher.input; - chain_data : chain_data_state Shared.t; - block_rpc_directories : - block RPC_directory.t Protocol_hash.Map.t Protocol_hash.Table.t; - header_rpc_directories : - (chain_state * Block_hash.t * Block_header.t) RPC_directory.t - Protocol_hash.Map.t - Protocol_hash.Table.t; -} - -and chain_data_state = { - mutable data : chain_data; - mutable checkpoint : Block_header.t; - chain_data_store : Legacy_store.Chain_data.store; -} - -and chain_data = { - current_head : block; - current_mempool : Mempool.t; - live_blocks : Block_hash.Set.t; - live_operations : Operation_hash.Set.t; - test_chain : Chain_id.t option; - save_point : Int32.t * Block_hash.t; - caboose : Int32.t * Block_hash.t; -} - -and block = { - chain_state : chain_state; - hash : Block_hash.t; - header : Block_header.t; -} - -(* Errors *) - -type error += Block_not_found of Block_hash.t - -type error += Block_contents_not_found of Block_hash.t - -let () = - register_error_kind - `Permanent - ~id:"state.block.not_found" - ~title:"Block_not_found" - ~description:"Block not found" - ~pp:(fun ppf block_hash -> - Format.fprintf ppf "@[Cannot find block %a@]" Block_hash.pp block_hash) - Data_encoding.(obj1 (req "block_not_found" @@ Block_hash.encoding)) - (function Block_not_found block_hash -> Some block_hash | _ -> None) - (fun block_hash -> Block_not_found block_hash) ; - register_error_kind - `Permanent - ~id:"state.block.contents_not_found" - ~title:"Block_contents_not_found" - ~description:"Block not found" - ~pp:(fun ppf block_hash -> - Format.fprintf - ppf - "@[Cannot find block contents %a@]" - Block_hash.pp - block_hash) - Data_encoding.(obj1 (req "block_contents_not_found" @@ Block_hash.encoding)) - (function - | Block_contents_not_found block_hash -> Some block_hash | _ -> None) - (fun block_hash -> Block_contents_not_found block_hash) - -(* Abstract view over block header storage. - This module aims to abstract over block header's [read], [read_opt] and [known] - functions by calling the adequate function depending on the block being pruned or not. -*) - -module Header = struct - let read (store, hash) = - Legacy_store.Block.Contents.read (store, hash) >>= function - | Ok {header; _} -> return header - | Error _ -> - Legacy_store.Block.Pruned_contents.read (store, hash) - >>=? fun {header} -> return header - - let read_opt (store, hash) = - read (store, hash) >>= function - | Ok header -> Lwt.return_some header - | Error _ -> Lwt.return_none - - let known (store, hash) = - Legacy_store.Block.Pruned_contents.known (store, hash) >>= function - | true -> Lwt.return_true - | false -> Legacy_store.Block.Contents.known (store, hash) -end - -let read_chain_data {chain_data; _} f = - Shared.use chain_data (fun state -> f state.chain_data_store state.data) - -let update_chain_data {chain_data; _} f = - Shared.use chain_data (fun state -> - f state.chain_data_store state.data >>= fun (data, res) -> - Option.iter (fun data -> state.data <- data) data ; - Lwt.return res) - -(** The number of predecessors stored per block. - This value chosen to compute efficiently block locators that - can cover a chain of 2 months, at 1 block/min, which is ~86K - blocks at the cost in space of ~72MB. - |locator| = log2(|chain|/10) -1 -*) -let stored_predecessors_size = 12 - -(** - Takes a block and populates its predecessors store, under the - assumption that all its predecessors have their store already - populated. The precedecessors are distributed along the chain, up - to the genesis, at a distance from [b] that grows exponentially. - The store tabulates a function [p] from distances to block_ids such - that if [p(b,d)=b'] then [b'] is at distance 2^d from [b]. - Example of how previous predecessors are used: - p(n,0) = n-1 - p(n,1) = n-2 = p(n-1,0) - p(n,2) = n-4 = p(n-2,1) - p(n,3) = n-8 = p(n-4,2) - p(n,4) = n-16 = p(n-8,3) -*) -let store_predecessors (store : Legacy_store.Block.store) (b : Block_hash.t) : - unit Lwt.t = - let rec loop pred dist = - if dist = stored_predecessors_size then Lwt.return_unit - else - Legacy_store.Block.Predecessors.read_opt (store, pred) (dist - 1) - >>= function - | None -> Lwt.return_unit (* we reached the last known block *) - | Some p -> - Legacy_store.Block.Predecessors.store (store, b) dist p >>= fun () -> - loop p (dist + 1) - in - (* the first predecessor is fetched from the header *) - Header.read_opt (store, b) >|= WithExceptions.Option.get ~loc:__LOC__ - >>= fun header -> - let pred = header.shell.predecessor in - if Block_hash.equal b pred then Lwt.return_unit (* genesis *) - else - Legacy_store.Block.Predecessors.store (store, b) 0 pred >>= fun () -> - loop pred 1 - -(** - [predecessor_n_raw s b d] returns the hash of the block at distance [d] from [b]. - Returns [None] if [d] is greater than the distance of [b] from genesis or - if [b] is genesis. - Works in O(log|chain|) if the chain is shorter than 2^[stored_predecessors_size] - and in O(|chain|) after that. - @raise Invalid_argument "State.predecessors: negative distance" -*) -let predecessor_n_raw store block_hash distance = - (* helper functions *) - (* computes power of 2 w/o floats *) - let power_of_2 n = - if n < 0 then invalid_arg "negative argument" - else - let rec loop cnt res = - if cnt < 1 then res else loop (cnt - 1) (res * 2) - in - loop n 1 - in - (* computes the closest power of two smaller than a given - a number and the rest w/o floats *) - let closest_power_two_and_rest n = - if n < 0 then invalid_arg "negative argument" - else - let rec loop cnt n rest = - if n <= 1 then (cnt, rest) - else loop (cnt + 1) (n / 2) (rest + (power_of_2 cnt * (n mod 2))) - in - loop 0 n 0 - in - (* actual predecessor function *) - if distance < 0 then - invalid_arg ("State.predecessor: distance < 0 " ^ string_of_int distance) - else if distance = 0 then Lwt.return_some block_hash - else - let rec loop block_hash distance = - if distance = 1 then - Legacy_store.Block.Predecessors.read_opt (store, block_hash) 0 - else - let (power, rest) = closest_power_two_and_rest distance in - let (power, rest) = - if power < stored_predecessors_size then (power, rest) - else - let power = stored_predecessors_size - 1 in - let rest = distance - power_of_2 power in - (power, rest) - in - Legacy_store.Block.Predecessors.read_opt (store, block_hash) power - >>= function - | None -> Lwt.return_none (* reached genesis *) - | Some pred -> - if rest = 0 then Lwt.return_some pred - (* landed on the requested predecessor *) - else loop pred rest - (* need to jump further back *) - in - loop block_hash distance - -let predecessor_n block_store block_hash distance = - Option.catch_os (fun () -> - predecessor_n_raw block_store block_hash distance >>= function - | None -> Lwt.return_none - | Some predecessor -> ( - Header.known (block_store, predecessor) >>= function - | false -> Lwt.return_none - | true -> Lwt.return_some predecessor)) - -type t = global_state - -module Locked_block = struct - let store_genesis store genesis context = - let shell : Block_header.shell_header = - let open Genesis in - { - level = 0l; - proto_level = 0; - predecessor = genesis.block; - (* genesis' predecessor is genesis *) - timestamp = genesis.time; - fitness = []; - validation_passes = 0; - operations_hash = Operation_list_list_hash.empty; - context; - } - in - let header : Block_header.t = {shell; protocol_data = Bytes.create 0} in - Legacy_store.Block.Contents.store - (store, genesis.block) - { - header; - Legacy_store.Block.message = Some "Genesis"; - max_operations_ttl = 0; - context; - metadata = Bytes.create 0; - last_allowed_fork_level = 0l; - } - >>= fun () -> Lwt.return header - - (* Will that block is compatible with the current checkpoint. *) - let acceptable chain_data (header : Block_header.t) = - let checkpoint_level = chain_data.checkpoint.shell.level in - if checkpoint_level < header.shell.level then - (* the predecessor is assumed compatible. *) - Lwt.return_true - else if checkpoint_level = header.shell.level then - Lwt.return (Block_header.equal header chain_data.checkpoint) - else - (* header.shell.level < level *) - (* valid only if the current head is lower than the checkpoint. *) - let head_level = chain_data.data.current_head.header.shell.level in - Lwt.return (head_level < checkpoint_level) - - (* Is a block still valid for a given checkpoint ? *) - let is_valid_for_checkpoint block_store hash (header : Block_header.t) - (checkpoint : Block_header.t) = - if Compare.Int32.(header.shell.level < checkpoint.shell.level) then - Lwt.return_true - else - predecessor_n - block_store - hash - (Int32.to_int @@ Int32.sub header.shell.level checkpoint.shell.level) - >|= WithExceptions.Option.get ~loc:__LOC__ - >>= fun predecessor -> - if Block_hash.equal predecessor (Block_header.hash checkpoint) then - Lwt.return_true - else Lwt.return_false -end - -(* Find the branches that are still valid with a given checkpoint, i.e. - heads with lower level, or branches that goes through the checkpoint. *) -let locked_valid_heads_for_checkpoint block_store data checkpoint = - Legacy_store.Chain_data.Known_heads.read_all data.chain_data_store - >>= fun heads -> - Block_hash.Set.fold_s - (fun head (valid_heads, invalid_heads) -> - Header.read_opt (block_store, head) - >|= WithExceptions.Option.get ~loc:__LOC__ - >>= fun header -> - Locked_block.is_valid_for_checkpoint block_store head header checkpoint - >>= fun valid -> - if valid then Lwt.return ((head, header) :: valid_heads, invalid_heads) - else Lwt.return (valid_heads, (head, header) :: invalid_heads)) - heads - ([], []) - -(* Tag as invalid all blocks in `heads` and their predecessors whose - level strictly higher to 'level'. *) -let tag_invalid_heads block_store chain_store heads level = - let rec tag_invalid_head (hash, header) = - if header.Block_header.shell.level <= level then - Legacy_store.Chain_data.Known_heads.store chain_store hash >>= fun () -> - Lwt.return_some (hash, header) - else - let errors = [Validation_errors.Checkpoint_error (hash, None)] in - Legacy_store.Block.Invalid_block.store - block_store - hash - {level = header.shell.level; errors} - >>= fun () -> - Legacy_store.Block.Contents.remove (block_store, hash) >>= fun () -> - Legacy_store.Block.Operation_hashes.remove_all (block_store, hash) - >>= fun () -> - Legacy_store.Block.Operations_metadata.remove_all (block_store, hash) - >>= fun () -> - Legacy_store.Block.Operations.remove_all (block_store, hash) >>= fun () -> - Legacy_store.Block.Predecessors.remove_all (block_store, hash) - >>= fun () -> - Header.read_opt (block_store, header.shell.predecessor) >>= function - | None -> Lwt.return_none - | Some header -> tag_invalid_head (Block_header.hash header, header) - in - List.iter_p - (fun (hash, _header) -> - Legacy_store.Chain_data.Known_heads.remove chain_store hash) - heads - >>= fun () -> List.filter_map_s tag_invalid_head heads - -let prune_block store block_hash = - let st = (store, block_hash) in - Legacy_store.Block.Contents.remove st >>= fun () -> - Legacy_store.Block.Invalid_block.remove store block_hash >>= fun () -> - Legacy_store.Block.Operations_metadata.remove_all st - -let store_header_and_prune_block store block_hash = - let st = (store, block_hash) in - (Legacy_store.Block.Contents.read_opt st >>= function - | Some {header; _} -> Legacy_store.Block.Pruned_contents.store st {header} - | None -> ( - Legacy_store.Block.Pruned_contents.known st >>= function - | true -> Lwt.return_unit - | false -> Events.(emit missing_pruned_contents) block_hash)) - >>= fun () -> prune_block store block_hash - -let delete_block store block_hash = - prune_block store block_hash >>= fun () -> - let st = (store, block_hash) in - Legacy_store.Block.Pruned_contents.remove st >>= fun () -> - Legacy_store.Block.Operations.remove_all st >>= fun () -> - Legacy_store.Block.Operation_hashes.remove_all st >>= fun () -> - Legacy_store.Block.Predecessors.remove_all st - -(* Remove all blocks that are not in the chain. *) -let cut_alternate_heads block_store chain_store heads = - let rec cut_alternate_head hash header = - Legacy_store.Chain_data.In_main_branch.known (chain_store, hash) - >>= fun in_chain -> - if in_chain then Lwt.return_unit - else - Header.read_opt (block_store, header.Block_header.shell.predecessor) - >>= function - | None -> delete_block block_store hash >>= fun () -> Lwt.return_unit - | Some header -> - delete_block block_store hash >>= fun () -> - cut_alternate_head (Block_header.hash header) header - in - List.iter_p - (fun (hash, header) -> - Legacy_store.Chain_data.Known_heads.remove chain_store hash >>= fun () -> - cut_alternate_head hash header) - heads - -module Chain = struct - type t = chain_state - - type chain_state = t - - let main {main_chain; _} = main_chain - - let test chain_state = - read_chain_data chain_state (fun _ chain_data -> - Lwt.return chain_data.test_chain) - - let all_indexed_protocols chain_state = - let chain_id = chain_state.chain_id in - let global_state = chain_state.global_state in - Shared.use global_state.global_data (fun global_data -> - let global_store = global_data.global_store in - let chain_store = Legacy_store.Chain.get global_store chain_id in - Legacy_store.Chain.Protocol_info.bindings chain_store) - - let get_level_indexed_protocol chain_state header = - let chain_id = chain_state.chain_id in - let protocol_level = header.Block_header.shell.proto_level in - let global_state = chain_state.global_state in - Shared.use global_state.global_data (fun global_data -> - let global_store = global_data.global_store in - let chain_store = Legacy_store.Chain.get global_store chain_id in - Legacy_store.Chain.Protocol_info.read_opt chain_store protocol_level - >>= function - | None -> Stdlib.failwith "State.Chain.get_level_index_protocol" - | Some (p, _) -> Lwt.return p) - - let update_level_indexed_protocol_store chain_state chain_id protocol_level - protocol_hash block_header = - let global_state = chain_state.global_state in - Shared.use chain_state.block_store (fun block_store -> - Header.read_opt - (block_store, block_header.Block_header.shell.predecessor) - >>= function - | None -> Lwt.return_none (* should not happen *) - | Some header -> Lwt.return_some header) - >>= function - | None -> Lwt.return_unit - | Some pred_header -> - if pred_header.shell.proto_level <> block_header.shell.proto_level then - Shared.use global_state.global_data (fun global_data -> - let global_store = global_data.global_store in - let chain_store = Legacy_store.Chain.get global_store chain_id in - Legacy_store.Chain.Protocol_info.store - chain_store - protocol_level - (protocol_hash, block_header.shell.level)) - else Lwt.return_unit - - let allocate ~genesis ~faked_genesis_hash ~save_point ~caboose ~expiration - ~allow_forked_chain ~current_head ~checkpoint ~chain_id global_state - context_index chain_data_store block_store = - Header.read_opt (block_store, current_head) - >|= WithExceptions.Option.get ~loc:__LOC__ - >>= fun current_block_head -> - let rec chain_data = - { - data = - { - save_point; - caboose; - current_head = - {chain_state; hash = current_head; header = current_block_head}; - current_mempool = Mempool.empty; - live_blocks = Block_hash.Set.singleton genesis.Genesis.block; - live_operations = Operation_hash.Set.empty; - test_chain = None; - }; - checkpoint; - chain_data_store; - } - and chain_state = - { - global_state; - chain_id; - chain_data = {Shared.data = chain_data; lock = Lwt_mutex.create ()}; - genesis; - faked_genesis_hash; - expiration; - allow_forked_chain; - block_store = Shared.create block_store; - context_index = Shared.create context_index; - block_watcher = Lwt_watcher.create_input (); - block_rpc_directories = Protocol_hash.Table.create 7; - header_rpc_directories = Protocol_hash.Table.create 7; - } - in - Lwt.return chain_state - - let locked_create global_state data ?expiration ?(allow_forked_chain = false) - chain_id genesis (genesis_header : Block_header.t) = - let open Genesis in - let chain_store = Legacy_store.Chain.get data.global_store chain_id in - let block_store = Legacy_store.Block.get chain_store - and chain_data_store = Legacy_store.Chain_data.get chain_store in - let save_point = (genesis_header.shell.level, genesis.block) in - let caboose = (genesis_header.shell.level, genesis.block) in - let proto_level = genesis_header.shell.proto_level in - Legacy_store.Chain.Genesis_hash.store chain_store genesis.block - >>= fun () -> - Legacy_store.Chain.Genesis_time.store chain_store genesis.time >>= fun () -> - Legacy_store.Chain.Genesis_protocol.store chain_store genesis.protocol - >>= fun () -> - Legacy_store.Chain_data.Current_head.store chain_data_store genesis.block - >>= fun () -> - Legacy_store.Chain_data.Known_heads.store chain_data_store genesis.block - >>= fun () -> - Legacy_store.Chain_data.Checkpoint.store chain_data_store genesis_header - >>= fun () -> - Legacy_store.Chain_data.Save_point.store chain_data_store save_point - >>= fun () -> - Legacy_store.Chain_data.Caboose.store chain_data_store caboose >>= fun () -> - Legacy_store.Chain.Protocol_info.store - chain_store - proto_level - (genesis.protocol, genesis_header.shell.level) - >>= fun () -> - (match expiration with - | None -> Lwt.return_unit - | Some time -> Legacy_store.Chain.Expiration.store chain_store time) - >>= fun () -> - (if allow_forked_chain then - Legacy_store.Chain.Allow_forked_chain.store data.global_store chain_id - else Lwt.return_unit) - >>= fun () -> - allocate - ~genesis - ~faked_genesis_hash:(Block_header.hash genesis_header) - ~current_head:genesis.block - ~expiration - ~allow_forked_chain - ~checkpoint:genesis_header - ~chain_id - ~save_point - ~caboose - global_state - data.context_index - chain_data_store - block_store - >>= fun chain -> - Chain_id.Table.add data.chains chain_id chain ; - Lwt.return chain - - let create state ?allow_forked_chain ~commit_genesis genesis chain_id = - Shared.use state.global_data (fun data -> - let chain_store = Legacy_store.Chain.get data.global_store chain_id in - let block_store = Legacy_store.Block.get chain_store in - if Chain_id.Table.mem data.chains chain_id then - Stdlib.failwith "State.Chain.create" - else - commit_genesis ~chain_id >>=? fun commit -> - Locked_block.store_genesis block_store genesis commit - >>= fun genesis_header -> - locked_create - state - data - ?allow_forked_chain - chain_id - genesis - genesis_header - >>= fun chain -> - (* in case this is a forked chain creation, - delete its header from the temporary table*) - Legacy_store.Forking_block_hash.remove - data.global_store - (Context.compute_testchain_chain_id genesis.block) - >>= fun () -> return chain) - - let locked_read global_state data chain_id = - let chain_store = Legacy_store.Chain.get data.global_store chain_id in - let block_store = Legacy_store.Block.get chain_store - and chain_data_store = Legacy_store.Chain_data.get chain_store in - Legacy_store.Chain.Genesis_hash.read chain_store >>=? fun genesis_hash -> - Legacy_store.Chain.Genesis_time.read chain_store >>=? fun time -> - Legacy_store.Chain.Genesis_protocol.read chain_store >>=? fun protocol -> - Legacy_store.Chain.Expiration.read_opt chain_store >>= fun expiration -> - Legacy_store.Chain.Allow_forked_chain.known data.global_store chain_id - >>= fun allow_forked_chain -> - Header.read (block_store, genesis_hash) >>=? fun genesis_header -> - let genesis = {Genesis.time; protocol; block = genesis_hash} in - Legacy_store.Chain_data.Current_head.read chain_data_store - >>=? fun current_head -> - Legacy_store.Chain_data.Checkpoint.read chain_data_store - >>=? fun checkpoint -> - Legacy_store.Chain_data.Save_point.read chain_data_store - >>=? fun save_point -> - Legacy_store.Chain_data.Caboose.read chain_data_store >>=? fun caboose -> - try - allocate - ~genesis - ~faked_genesis_hash:(Block_header.hash genesis_header) - ~current_head - ~expiration - ~allow_forked_chain - ~checkpoint - ~chain_id - ~save_point - ~caboose - global_state - data.context_index - chain_data_store - block_store - >>= return - with Not_found -> fail Bad_data_dir - - let locked_read_all global_state data = - Legacy_store.Chain.list data.global_store >>= fun ids -> - List.iter_ep - (fun id -> - locked_read global_state data id >>=? fun chain -> - Chain_id.Table.add data.chains id chain ; - return_unit) - ids - - let read_all state = - Shared.use state.global_data (fun data -> locked_read_all state data) - - let get_opt state id = - Shared.use state.global_data (fun data -> - Lwt.return (Chain_id.Table.find data.chains id)) - - let get_exn state id = - get_opt state id >|= WithExceptions.Option.to_exn ~none:Not_found - - let get state id = - get_opt state id >|= function - | Some v -> Ok v - | None -> error (Unknown_chain id) - - let all state = - Shared.use state.global_data (fun {chains; _} -> - Lwt.return @@ Chain_id.Table.to_seq_values chains) - - let id {chain_id; _} = chain_id - - let genesis {genesis; _} = genesis - - let faked_genesis_hash {faked_genesis_hash; _} = faked_genesis_hash - - let expiration {expiration; _} = expiration - - let allow_forked_chain {allow_forked_chain; _} = allow_forked_chain - - let global_state {global_state; _} = global_state - - let checkpoint chain_state = - Shared.use chain_state.chain_data (fun {checkpoint; _} -> - Lwt.return checkpoint) - - let save_point chain_state = - Shared.use chain_state.chain_data (fun state -> - Lwt.return state.data.save_point) - - let caboose chain_state = - Shared.use chain_state.chain_data (fun state -> - Lwt.return state.data.caboose) - - let purge_loop_full ?(chunk_size = 4000) global_store store ~genesis_hash - block_hash caboose_level = - let do_prune blocks = - Legacy_store.with_atomic_rw global_store @@ fun () -> - List.iter_s (store_header_and_prune_block store) blocks - in - let rec loop block_hash (n_blocks, blocks) = - (if n_blocks >= chunk_size then - do_prune blocks >>= fun () -> Lwt.return (0, []) - else Lwt.return (n_blocks, blocks)) - >>= fun (n_blocks, blocks) -> - Header.read_opt (store, block_hash) - >|= WithExceptions.Option.get ~loc:__LOC__ - >>= fun header -> - if Block_hash.equal block_hash genesis_hash then do_prune blocks - else if header.shell.level = caboose_level then - do_prune (block_hash :: blocks) - else loop header.shell.predecessor (n_blocks + 1, block_hash :: blocks) - in - Header.read_opt (store, block_hash) - >|= WithExceptions.Option.get ~loc:__LOC__ - >>= fun header -> loop header.shell.predecessor (0, []) - - let purge_full chain_state (lvl, hash) = - Shared.use chain_state.global_state.global_data (fun global_data -> - Shared.use chain_state.block_store (fun store -> - update_chain_data chain_state (fun _ data -> - purge_loop_full - global_data.global_store - store - ~genesis_hash:chain_state.genesis.block - hash - (fst data.save_point) - >>= fun () -> - let new_data = {data with save_point = (lvl, hash)} in - Lwt.return (Some new_data, ())) - >>= fun () -> - Shared.use chain_state.chain_data (fun data -> - Legacy_store.Chain_data.Save_point.store - data.chain_data_store - (lvl, hash) - >>= fun () -> return_unit))) - - let purge_loop_rolling global_store store ~genesis_hash block_hash limit = - let do_delete blocks = - Legacy_store.with_atomic_rw global_store @@ fun () -> - List.iter_s (delete_block store) blocks - in - let rec prune_loop block_hash limit = - if Block_hash.equal genesis_hash block_hash then Lwt.return block_hash - else if limit = 1 then - Header.read_opt (store, block_hash) >>= function - | None -> assert false (* Should not happen. *) - | Some header -> - store_header_and_prune_block store block_hash >>= fun () -> - delete_loop header.shell.predecessor (0, []) >>= fun () -> - Lwt.return block_hash - else - Header.read_opt (store, block_hash) >>= function - | None -> assert false (* Should not happen. *) - | Some header -> - store_header_and_prune_block store block_hash >>= fun () -> - prune_loop header.shell.predecessor (limit - 1) - and delete_loop block_hash (n_blocks, blocks) = - (if n_blocks >= 4000 then do_delete blocks >>= fun () -> Lwt.return (0, []) - else Lwt.return (n_blocks, blocks)) - >>= fun (n_blocks, blocks) -> - Header.read_opt (store, block_hash) >>= function - | None -> do_delete blocks - | Some header -> - if Block_hash.equal genesis_hash block_hash then do_delete blocks - else - delete_loop - header.shell.predecessor - (n_blocks + 1, block_hash :: blocks) - in - Header.read_opt (store, block_hash) - >|= WithExceptions.Option.get ~loc:__LOC__ - >>= fun header -> - if limit = 0 then - delete_loop header.shell.predecessor (0, []) >>= fun () -> - Lwt.return block_hash - else prune_loop header.shell.predecessor limit - - let purge_rolling chain_state ((lvl, hash) as checkpoint) = - Shared.use chain_state.global_state.global_data (fun global_data -> - Shared.use chain_state.block_store (fun store -> - (Legacy_store.Block.Contents.read_opt (store, hash) >>= function - | None -> fail (Block_contents_not_found hash) - | Some contents -> return contents) - >>=? fun contents -> - let max_op_ttl = contents.max_operations_ttl in - let limit = max_op_ttl in - purge_loop_rolling - ~genesis_hash:chain_state.genesis.block - global_data.global_store - store - hash - limit - >>= fun caboose_hash -> - let caboose_level = Int32.sub lvl (Int32.of_int max_op_ttl) in - let caboose = (caboose_level, caboose_hash) in - update_chain_data chain_state (fun _ data -> - let new_data = {data with save_point = checkpoint; caboose} in - Lwt.return (Some new_data, ())) - >>= fun () -> - Shared.use chain_state.chain_data (fun data -> - Legacy_store.Chain_data.Save_point.store - data.chain_data_store - checkpoint - >>= fun () -> - Legacy_store.Chain_data.Caboose.store - data.chain_data_store - caboose - >>= fun () -> return_unit))) - - let set_checkpoint chain_state checkpoint = - Shared.use chain_state.block_store (fun store -> - Shared.use chain_state.chain_data (fun data -> - let head_header = data.data.current_head.header in - let head_hash = data.data.current_head.hash in - Locked_block.is_valid_for_checkpoint - store - head_hash - head_header - checkpoint - >>= fun valid -> - assert valid ; - (* Remove outdated invalid blocks. *) - Legacy_store.Block.Invalid_block.iter store ~f:(fun hash iblock -> - if iblock.level <= checkpoint.shell.level then - Legacy_store.Block.Invalid_block.remove store hash - else Lwt.return_unit) - >>= fun () -> - (* Remove outdated heads and tag invalid branches. *) - ( locked_valid_heads_for_checkpoint store data checkpoint - >>= fun (valid_heads, invalid_heads) -> - tag_invalid_heads - store - data.chain_data_store - invalid_heads - checkpoint.shell.level - >>= fun outdated_invalid_heads -> - if head_header.shell.level < checkpoint.shell.level then - Lwt.return_unit - else - let outdated_valid_heads = - List.filter - (fun (hash, {Block_header.shell; _}) -> - shell.level <= checkpoint.shell.level - && not (Block_hash.equal hash head_hash)) - valid_heads - in - cut_alternate_heads - store - data.chain_data_store - outdated_valid_heads - >>= fun () -> - cut_alternate_heads - store - data.chain_data_store - outdated_invalid_heads ) - >>= fun () -> - (* Legacy_store the new checkpoint. *) - Legacy_store.Chain_data.Checkpoint.store - data.chain_data_store - checkpoint - >>= fun () -> - data.checkpoint <- checkpoint ; - (* TODO 'git fsck' in the context. *) - Lwt.return_unit)) - - let set_checkpoint_then_purge_full chain_state checkpoint = - set_checkpoint chain_state checkpoint >>= fun () -> - let lvl = checkpoint.shell.level in - let hash = Block_header.hash checkpoint in - purge_full chain_state (lvl, hash) - - let set_checkpoint_then_purge_rolling chain_state checkpoint = - set_checkpoint chain_state checkpoint >>= fun () -> - let lvl = checkpoint.shell.level in - let hash = Block_header.hash checkpoint in - purge_rolling chain_state (lvl, hash) - - let acceptable_block chain_state (header : Block_header.t) = - Shared.use chain_state.chain_data (fun chain_data -> - Locked_block.acceptable chain_data header) - - let destroy state chain = - Events.(emit destroy_state (id chain)) >>= fun () -> - Shared.use state.global_data (fun {global_store; chains; _} -> - Chain_id.Table.remove chains (id chain) ; - Legacy_store.Chain.destroy global_store (id chain)) - - let store chain_state = - Shared.use chain_state.global_state.global_data (fun global_data -> - Lwt.return global_data.global_store) -end - -module Block = struct - type t = block = { - chain_state : Chain.t; - hash : Block_hash.t; - header : Block_header.t; - } - - type block = t - - module Header = Header - - let compare b1 b2 = Block_hash.compare b1.hash b2.hash - - let equal b1 b2 = Block_hash.equal b1.hash b2.hash - - let hash {hash; _} = hash - - let header {header; _} = header - - let read_contents block = - Shared.use block.chain_state.block_store (fun store -> - Legacy_store.Block.Contents.read_opt (store, block.hash) >>= function - | None -> fail (Block_contents_not_found block.hash) - | Some contents -> return contents) - - let read_contents_opt block = - Shared.use block.chain_state.block_store (fun store -> - Legacy_store.Block.Contents.read_opt (store, block.hash) >>= function - | None -> Lwt.return_none - | Some contents -> Lwt.return_some contents) - - let header_of_hash chain_state hash = - Shared.use chain_state.block_store (fun store -> - Header.read_opt (store, hash)) - - let metadata b = read_contents b >>=? fun {metadata; _} -> return metadata - - let chain_state {chain_state; _} = chain_state - - let chain_id {chain_state = {chain_id; _}; _} = chain_id - - let shell_header {header = {shell; _}; _} = shell - - let timestamp b = (shell_header b).timestamp - - let fitness b = (shell_header b).fitness - - let level b = (shell_header b).level - - let validation_passes b = (shell_header b).validation_passes - - let message b = read_contents b >>=? fun {message; _} -> return message - - let max_operations_ttl b = - read_contents b >>=? fun {max_operations_ttl; _} -> - return max_operations_ttl - - let last_allowed_fork_level b = - read_contents b >>=? fun {last_allowed_fork_level; _} -> - return last_allowed_fork_level - - let is_genesis b = Block_hash.equal b.hash b.chain_state.genesis.block - - let known_valid chain_state hash = - Shared.use chain_state.block_store (fun store -> Header.known (store, hash)) - - let known_invalid chain_state hash = - Shared.use chain_state.block_store (fun store -> - Legacy_store.Block.Invalid_block.known store hash) - - let read_invalid chain_state hash = - Shared.use chain_state.block_store (fun store -> - Legacy_store.Block.Invalid_block.read_opt store hash) - - let list_invalid chain_state = - Shared.use chain_state.block_store (fun store -> - Legacy_store.Block.Invalid_block.fold - store - ~init:[] - ~f:(fun hash {level; errors} acc -> - Lwt.return ((hash, level, errors) :: acc))) - - let unmark_invalid chain_state block = - Shared.use chain_state.block_store (fun store -> - Legacy_store.Block.Invalid_block.known store block >>= fun mem -> - if mem then - Legacy_store.Block.Invalid_block.remove store block >>= return - else fail (Block_not_invalid block)) - - let is_valid_for_checkpoint block checkpoint = - let chain_state = block.chain_state in - Shared.use chain_state.block_store (fun store -> - Locked_block.is_valid_for_checkpoint - store - block.hash - block.header - checkpoint) - - let read_predecessor chain_state ~pred hash = - Shared.use chain_state.block_store (fun store -> - predecessor_n store hash pred >>= fun hash_opt -> - let new_hash_opt = - match hash_opt with - | Some _ as hash_opt -> hash_opt - | None -> - if Block_hash.equal hash chain_state.genesis.block then - Some chain_state.genesis.block - else None - in - match new_hash_opt with - | None -> Lwt.fail Not_found - | Some hash -> ( - Header.read_opt (store, hash) >>= fun header -> - match header with - | Some header -> Lwt.return_some {chain_state; hash; header} - | None -> Lwt.return_none)) - - let read chain_state hash = - Shared.use chain_state.block_store (fun store -> - Header.read (store, hash) >>=? fun header -> - return {chain_state; hash; header}) - - let read_opt chain_state hash = - read chain_state hash >>= function - | Error _ -> Lwt.return_none - | Ok v -> Lwt.return_some v - - let predecessor {chain_state; header; hash; _} = - if Block_hash.equal hash header.shell.predecessor then Lwt.return_none - (* we are at genesis *) - else read_opt chain_state header.shell.predecessor - - let predecessor_n b n = - Shared.use b.chain_state.block_store (fun block_store -> - predecessor_n block_store b.hash n) - - (* EDITED : No context checks *) - let store chain_state block_header block_header_metadata operations - operations_metadata block_metadata_hash ops_metadata_hashes - ({ - context_hash; - timestamp = _; - message; - max_operations_ttl; - last_allowed_fork_level; - } : - Block_validation.validation_store) ~forking_testchain = - let bytes = Block_header.to_bytes block_header in - let hash = Block_header.hash_raw bytes in - fail_unless - Compare.List_length_with.( - operations = block_header.shell.validation_passes) - (error_of_fmt "State.Block.store: invalid operations length") - >>=? fun () -> - fail_unless - Compare.List_length_with.( - operations_metadata = block_header.shell.validation_passes) - (error_of_fmt "State.Block.store: invalid operations_data length") - >>=? fun () -> - let inconsistent_failure = - error_of_fmt - "State.Block.store: inconsistent operations and operations_data" - in - (List.for_all2 - ~when_different_lengths:inconsistent_failure - (fun l1 l2 -> Compare.List_lengths.(l1 = l2)) - operations - operations_metadata - |> function - | Ok _ as ok -> ok - | Error err -> error err) - >>?= fun all_have_equal_lengths -> - error_unless all_have_equal_lengths inconsistent_failure >>?= fun () -> - (* let's the validator check the consistency... of fitness, level, ... *) - Shared.use chain_state.block_store (fun store -> - Legacy_store.Block.Invalid_block.known store hash - >>= fun known_invalid -> - fail_when known_invalid (error_of_fmt "Known invalid") >>=? fun () -> - Legacy_store.Block.Contents.known (store, hash) >>= fun known -> - if known then return_none - else - (* safety check: never ever commit a block that is not compatible - with the current checkpoint. *) - let predecessor = block_header.shell.predecessor in - ( Header.known (store, predecessor) >>= fun valid_predecessor -> - if not valid_predecessor then Lwt.return_false - else - Shared.use chain_state.chain_data (fun chain_data -> - Locked_block.acceptable chain_data block_header) ) - >>= fun acceptable_block -> - fail_unless acceptable_block (Checkpoint_error (hash, None)) - >>=? fun () -> - let commit = context_hash in - Header.read (store, predecessor) >>=? fun pred_block -> - Chain.get_level_indexed_protocol chain_state pred_block - >>= fun protocol -> - (match Registered_protocol.get protocol with - | Some (module Proto) -> return Proto.environment_version - | None -> - fail - (Block_validator_errors.Unavailable_protocol - {block = predecessor; protocol})) - >>=? fun env -> - let contents = - { - header = block_header; - Legacy_store.Block.message; - max_operations_ttl; - last_allowed_fork_level; - context = commit; - metadata = block_header_metadata; - } - in - Legacy_store.Block.Contents.store (store, hash) contents >>= fun () -> - List.iteri_p - (fun i ops -> - Legacy_store.Block.Operation_hashes.store - (store, hash) - i - (List.map Operation.hash ops)) - operations - >>= fun () -> - List.iteri_p - (fun i ops -> - Legacy_store.Block.Operations.store (store, hash) i ops) - operations - >>= fun () -> - List.iteri_p - (fun i ops -> - Legacy_store.Block.Operations_metadata.store (store, hash) i ops) - operations_metadata - >>= fun () -> - (match block_metadata_hash with - | Some block_metadata_hash -> - Legacy_store.Block.Block_metadata_hash.store - (store, hash) - block_metadata_hash - >|= ok - | None -> ( - match env with - | V1 | V2 | V3 | V4 -> - fail @@ Missing_block_metadata_hash predecessor - | V0 -> return_unit)) - >>=? fun () -> - (match ops_metadata_hashes with - | Some ops_metadata_hashes -> - Lwt_list.iteri_s - (fun i hashes -> - Legacy_store.Block.Operations_metadata_hashes.store - (store, hash) - i - hashes) - ops_metadata_hashes - >|= ok - | None -> ( - match env with - | V1 when pred_block.shell.validation_passes > 0 -> - fail @@ Missing_operation_metadata_hashes predecessor - | _ -> return_unit)) - >>=? fun () -> - (* Legacy_store predecessors *) - store_predecessors store hash >>= fun () -> - (* Update the chain state. *) - Shared.use chain_state.chain_data (fun chain_data -> - let store = chain_data.chain_data_store in - let predecessor = block_header.shell.predecessor in - Legacy_store.Chain_data.Known_heads.remove store predecessor - >>= fun () -> Legacy_store.Chain_data.Known_heads.store store hash) - >>= fun () -> - (if forking_testchain then - Shared.use chain_state.global_state.global_data (fun global_data -> - let genesis = Context.compute_testchain_genesis hash in - Legacy_store.Forking_block_hash.store - global_data.global_store - (Context.compute_testchain_chain_id genesis) - hash) - else Lwt.return_unit) - >>= fun () -> - let block = {chain_state; hash; header = block_header} in - Lwt_watcher.notify chain_state.block_watcher block ; - Lwt_watcher.notify chain_state.global_state.block_watcher block ; - return_some block) - - let remove block = - let hash = block.hash in - let header = block.header in - protect (fun () -> - Shared.use block.chain_state.block_store (fun store -> - Legacy_store.Block.Contents.remove (store, hash) >>= fun () -> - Legacy_store.Block.Operations.remove_all (store, hash) >>= fun () -> - Legacy_store.Block.Operations_metadata.remove_all (store, hash) - >>= fun () -> - Legacy_store.Block.Operation_hashes.remove_all (store, hash) - >>= fun () -> - Shared.use block.chain_state.chain_data (fun chain_data -> - let store = chain_data.chain_data_store in - let predecessor = header.shell.predecessor in - Legacy_store.Chain_data.Known_heads.remove store hash - >>= fun () -> - Legacy_store.Chain_data.Known_heads.store store predecessor - >>= fun () -> - Legacy_store.Chain_data.In_main_branch.remove (store, hash) - >>= fun () -> - Legacy_store.Chain_data.Current_head.read_opt store >>= function - | Some block_hash when block_hash = hash -> - Legacy_store.Chain_data.Current_head.store store predecessor - | Some _ | None -> Lwt.return_unit) - >>= fun () -> return_unit)) - - let store_invalid chain_state block_header errors = - let bytes = Block_header.to_bytes block_header in - let hash = Block_header.hash_raw bytes in - Shared.use chain_state.block_store (fun store -> - Header.known (store, hash) >>= fun known_valid -> - fail_when known_valid (error_of_fmt "Known valid") >>=? fun () -> - Legacy_store.Block.Invalid_block.known store hash - >>= fun known_invalid -> - if known_invalid then return_false - else - Legacy_store.Block.Invalid_block.store - store - hash - {level = block_header.shell.level; errors} - >>= fun () -> return_true) - - let watcher (state : chain_state) = - Lwt_watcher.create_stream state.block_watcher - - let compute_operation_path hashes = - let list_hashes = List.map Operation_list_hash.compute hashes in - Operation_list_list_hash.compute_path list_hashes - - let operation_hashes {chain_state; hash; header} i = - if i < 0 || header.shell.validation_passes <= i then - invalid_arg "State.Block.operations" ; - Shared.use chain_state.block_store (fun store -> - List.map_p - (fun n -> - Legacy_store.Block.Operation_hashes.read_opt (store, hash) n - >|= WithExceptions.Option.get ~loc:__LOC__) - (0 -- (header.shell.validation_passes - 1)) - >>= fun hashes -> - let path = compute_operation_path hashes in - Lwt.return - ( WithExceptions.Option.to_exn ~none:Not_found @@ List.nth hashes i, - path i )) - - let all_operation_hashes {chain_state; hash; header; _} = - Shared.use chain_state.block_store (fun store -> - List.map_p - (fun i -> - Legacy_store.Block.Operation_hashes.read_opt (store, hash) i - >|= WithExceptions.Option.get ~loc:__LOC__) - (0 -- (header.shell.validation_passes - 1))) - - let operations {chain_state; hash; header; _} i = - if i < 0 || header.shell.validation_passes <= i then - invalid_arg "State.Block.operations" ; - Shared.use chain_state.block_store (fun store -> - List.map_p - (fun n -> - Legacy_store.Block.Operation_hashes.read_opt (store, hash) n - >|= WithExceptions.Option.get ~loc:__LOC__) - (0 -- (header.shell.validation_passes - 1)) - >>= fun hashes -> - let path = compute_operation_path hashes in - Legacy_store.Block.Operations.read_opt (store, hash) i - >|= WithExceptions.Option.get ~loc:__LOC__ - >>= fun ops -> Lwt.return (ops, path i)) - - let operations_metadata {chain_state; hash; header; _} i = - if i < 0 || header.shell.validation_passes <= i then - invalid_arg "State.Block.operations_metadata" ; - Shared.use chain_state.block_store (fun store -> - Legacy_store.Block.Operations_metadata.read_opt (store, hash) i - >|= WithExceptions.Option.get ~loc:__LOC__) - - let all_operations {chain_state; hash; header; _} = - Shared.use chain_state.block_store (fun store -> - List.map_p - (fun i -> - Legacy_store.Block.Operations.read_opt (store, hash) i - >|= WithExceptions.Option.get ~loc:__LOC__) - (0 -- (header.shell.validation_passes - 1))) - - let all_operations_metadata {chain_state; hash; header; _} = - Shared.use chain_state.block_store (fun store -> - List.map_p - (fun i -> - Legacy_store.Block.Operations_metadata.read_opt (store, hash) i - >|= WithExceptions.Option.get ~loc:__LOC__) - (0 -- (header.shell.validation_passes - 1))) - - let metadata_hash {chain_state; hash; _} = - Shared.use chain_state.block_store (fun store -> - Legacy_store.Block.Block_metadata_hash.read_opt (store, hash)) - - let operations_metadata_hashes {chain_state; hash; _} i = - Shared.use chain_state.block_store (fun store -> - Legacy_store.Block.Operations_metadata_hashes.read_opt (store, hash) i) - - let all_operations_metadata_hashes {chain_state; hash; header; _} = - Shared.use chain_state.block_store (fun store -> - if header.shell.validation_passes = 0 then Lwt.return_none - else - Legacy_store.Block.Operations_metadata_hashes.known (store, hash) 0 - >>= function - | false -> Lwt.return_none - | true -> - List.map_p - (fun i -> - Legacy_store.Block.Operations_metadata_hashes.read_opt - (store, hash) - i - >|= WithExceptions.Option.get ~loc:__LOC__) - (0 -- (header.shell.validation_passes - 1)) - >|= fun hashes -> Some hashes) - - let all_operations_metadata_hash block = - all_operations_metadata_hashes block - >|= fun predecessor_ops_metadata_hashes -> - Option.map - (fun hashes -> - List.map Operation_metadata_list_hash.compute hashes - |> Operation_metadata_list_list_hash.compute) - predecessor_ops_metadata_hashes - - let context_exn {chain_state; hash; _} = - Lwt.catch - (fun () -> - Shared.use chain_state.block_store (fun block_store -> - Legacy_store.Block.Contents.read_opt (block_store, hash)) - >|= WithExceptions.Option.get ~loc:__LOC__ - >>= fun {context = commit; _} -> - Shared.use chain_state.context_index (fun context_index -> - Context.checkout_exn context_index commit)) - (fun _ -> Lwt.fail Not_found) - - let context_opt {chain_state; hash; _} = - Shared.use chain_state.block_store (fun block_store -> - Legacy_store.Block.Contents.read_opt (block_store, hash)) - >|= WithExceptions.Option.get ~loc:__LOC__ - >>= fun {context = commit; _} -> - Shared.use chain_state.context_index (fun context_index -> - Context.checkout context_index commit) - - let context block = - context_opt block >>= function - | Some context -> return context - | None -> fail (Block_contents_not_found block.hash) - - let context_exists {chain_state; hash; _} = - Shared.use chain_state.block_store (fun block_store -> - Legacy_store.Block.Contents.read_opt (block_store, hash)) - >|= WithExceptions.Option.get ~loc:__LOC__ - >>= fun {context = commit; _} -> - Shared.use chain_state.context_index (fun context_index -> - Context.exists context_index commit) - - let protocol_hash ({chain_state; _} as block) = - Chain.save_point chain_state >>= fun (save_point_level, _) -> - if Compare.Int32.(level block < save_point_level) then - Chain.get_level_indexed_protocol chain_state block.header >>= return - else - context block >>=? fun context -> - protect (fun () -> Context.get_protocol context >>= return) - - let protocol_hash_exn ({chain_state; _} as block) = - Chain.save_point chain_state >>= fun (save_point_level, _) -> - if Compare.Int32.(level block < save_point_level) then - Chain.get_level_indexed_protocol chain_state block.header - else context_exn block >>= fun context -> Context.get_protocol context - - let protocol_level block = block.header.shell.proto_level - - let test_chain block = - context_exn block >>= fun context -> - Context.get_test_chain context >>= fun status -> - let lookup_testchain genesis = - let chain_id = Context.compute_testchain_chain_id genesis in - (* otherwise, look in the temporary table *) - Shared.use block.chain_state.global_state.global_data (fun global_data -> - Legacy_store.Forking_block_hash.read_opt - global_data.global_store - chain_id) - >>= function - | Some forking_block_hash -> - read_opt block.chain_state forking_block_hash >>= fun forking_block -> - Lwt.return (status, forking_block) - | None -> Lwt.return (status, None) - in - match status with - | Running {genesis; _} -> lookup_testchain genesis - | Forking _ -> Lwt.return (status, Some block) - | Not_running -> Lwt.return (status, None) - - let known chain_state hash = - Shared.use chain_state.block_store (fun store -> - Header.known (store, hash) >>= fun known -> - if known then Lwt.return_true - else Legacy_store.Block.Invalid_block.known store hash) - - let block_validity chain_state block : Block_locator.validity Lwt.t = - known chain_state block >>= function - | false -> - if Block_hash.equal block (Chain.faked_genesis_hash chain_state) then - Lwt.return Block_locator.Known_valid - else Lwt.return Block_locator.Unknown - | true -> ( - known_invalid chain_state block >>= function - | true -> Lwt.return Block_locator.Known_invalid - | false -> Lwt.return Block_locator.Known_valid) - - let known_ancestor chain_state locator = - Shared.use chain_state.global_state.global_data (fun {global_store; _} -> - Legacy_store.Configuration.History_mode.read_opt global_store - >|= WithExceptions.Option.get ~loc:__LOC__) - >>= fun history_mode -> - Block_locator.unknown_prefix ~is_known:(block_validity chain_state) locator - >>= function - | (Known_valid, prefix_locator) -> Lwt.return_some prefix_locator - | (Known_invalid, _) -> Lwt.return_none - | (Unknown, _) -> ( - match history_mode with - | Archive -> Lwt.return_none - | Rolling | Full -> Lwt.return_some locator) - - (* Hypothesis : genesis' predecessor is itself. *) - let get_rpc_directory ({chain_state; _} as block) = - read_opt chain_state block.header.shell.predecessor >>= function - | None -> Lwt.return_none (* assert false *) - | Some pred when equal pred block -> Lwt.return_none (* genesis *) - | Some pred -> ( - protocol_hash_exn pred >>= fun protocol -> - match - Protocol_hash.Table.find chain_state.block_rpc_directories protocol - with - | None -> Lwt.return_none - | Some map -> - protocol_hash_exn block >>= fun next_protocol -> - Lwt.return (Protocol_hash.Map.find next_protocol map)) - - let set_rpc_directory ({chain_state; _} as block) dir = - read_opt chain_state block.header.shell.predecessor - >|= WithExceptions.Option.get ~loc:__LOC__ - >>= fun pred -> - protocol_hash_exn block >>= fun next_protocol -> - Chain.save_point chain_state >>= fun (save_point_level, _) -> - (if Compare.Int32.(level pred < save_point_level) then - Chain.get_level_indexed_protocol chain_state (header pred) - else protocol_hash_exn pred) - >>= fun protocol -> - let map = - Option.value - ~default:Protocol_hash.Map.empty - (Protocol_hash.Table.find chain_state.block_rpc_directories protocol) - in - Protocol_hash.Table.replace - chain_state.block_rpc_directories - protocol - (Protocol_hash.Map.add next_protocol dir map) ; - return_unit - - let get_header_rpc_directory chain_state header = - Shared.use chain_state.block_store (fun block_store -> - Header.read_opt (block_store, header.Block_header.shell.predecessor) - >>= function - | None -> Lwt.return_none (* caboose *) - | Some pred when Block_header.equal pred header -> - Lwt.return_none (* genesis *) - | Some pred -> ( - Chain.get_level_indexed_protocol chain_state header - >>= fun protocol -> - match - Protocol_hash.Table.find - chain_state.header_rpc_directories - protocol - with - | None -> Lwt.return_none - | Some map -> - Chain.get_level_indexed_protocol chain_state pred - >>= fun next_protocol -> - Lwt.return (Protocol_hash.Map.find next_protocol map))) - - let set_header_rpc_directory chain_state header dir = - Shared.use chain_state.block_store (fun block_store -> - Header.read_opt (block_store, header.Block_header.shell.predecessor) - >>= function - | None -> assert false - | Some pred -> - Chain.get_level_indexed_protocol chain_state header - >>= fun next_protocol -> - Chain.get_level_indexed_protocol chain_state pred - >>= fun protocol -> - let map = - Option.value - ~default:Protocol_hash.Map.empty - (Protocol_hash.Table.find - chain_state.header_rpc_directories - protocol) - in - Protocol_hash.Table.replace - chain_state.header_rpc_directories - protocol - (Protocol_hash.Map.add next_protocol dir map) ; - Lwt.return_unit) -end - -let watcher (state : global_state) = - Lwt_watcher.create_stream state.block_watcher - -let read_block {global_data; _} hash = - Shared.use global_data (fun {chains; _} -> - Chain_id.Table.fold_s - (fun _chain_id chain_state acc -> - match acc with - | Some _ -> Lwt.return acc - | None -> ( - Block.read_opt chain_state hash >>= function - | None -> Lwt.return acc - | Some block -> Lwt.return_some block)) - chains - None) - -let read_block_exn t hash = - read_block t hash >>= function - | None -> Lwt.fail Not_found - | Some b -> Lwt.return b - -let update_testchain block ~testchain_state = - update_chain_data block.chain_state (fun _ chain_data -> - Lwt.return - (Some {chain_data with test_chain = Some testchain_state.chain_id}, ())) - -let fork_testchain block chain_id genesis_hash genesis_header protocol - expiration = - Shared.use block.chain_state.global_state.global_data (fun data -> - let chain_store = Legacy_store.Chain.get data.global_store chain_id in - let block_store = Legacy_store.Block.get chain_store in - Legacy_store.Block.Contents.store - (block_store, genesis_hash) - { - header = genesis_header; - Legacy_store.Block.message = Some "Genesis"; - max_operations_ttl = 0; - context = genesis_header.shell.context; - metadata = Bytes.create 0; - last_allowed_fork_level = 0l; - } - >>= fun () -> - let genesis = - { - Genesis.block = genesis_hash; - time = genesis_header.shell.timestamp; - protocol; - } - in - Chain.locked_create - block.chain_state.global_state - data - chain_id - ~expiration - genesis - genesis_header - >>= fun testchain_state -> - Legacy_store.Chain.Protocol_info.store - chain_store - genesis_header.shell.proto_level - (protocol, genesis_header.shell.level) - >>= fun () -> - update_testchain block ~testchain_state >>= fun () -> - return testchain_state) - -let best_known_head_for_checkpoint chain_state checkpoint = - Shared.use chain_state.block_store (fun store -> - Shared.use chain_state.chain_data (fun data -> - let head_hash = data.data.current_head.hash in - let head_header = data.data.current_head.header in - Locked_block.is_valid_for_checkpoint - store - head_hash - head_header - checkpoint - >>= fun valid -> - if valid then Lwt.return data.data.current_head - else - let find_valid_predecessor hash = - Header.read_opt (store, hash) - >|= WithExceptions.Option.get ~loc:__LOC__ - >>= fun header -> - if Compare.Int32.(header.shell.level < checkpoint.shell.level) - then Lwt.return {hash; chain_state; header} - else - predecessor_n - store - hash - (1 - + (Int32.to_int - @@ Int32.sub header.shell.level checkpoint.shell.level)) - >|= WithExceptions.Option.get ~loc:__LOC__ - >>= fun pred -> - Header.read_opt (store, pred) - >|= WithExceptions.Option.get ~loc:__LOC__ - >>= fun pred_header -> - Lwt.return {hash = pred; chain_state; header = pred_header} - in - Legacy_store.Chain_data.Known_heads.read_all data.chain_data_store - >>= fun heads -> - Header.read_opt (store, chain_state.genesis.block) - >|= WithExceptions.Option.get ~loc:__LOC__ - >>= fun genesis_header -> - let genesis = - { - hash = chain_state.genesis.block; - chain_state; - header = genesis_header; - } - in - Block_hash.Set.fold_s - (fun head best -> - find_valid_predecessor head >|= fun pred -> - if - Fitness.( - pred.header.shell.fitness > best.header.shell.fitness) - then pred - else best) - heads - genesis)) - -module Protocol = struct - include Protocol - - let known global_state hash = - Shared.use global_state.protocol_store (fun store -> - Legacy_store.Protocol.Contents.known store hash) - - let read global_state hash = - Shared.use global_state.protocol_store (fun store -> - Legacy_store.Protocol.Contents.read store hash) - - let read_opt global_state hash = - Shared.use global_state.protocol_store (fun store -> - Legacy_store.Protocol.Contents.read_opt store hash) - - let read_raw global_state hash = - Shared.use global_state.protocol_store (fun store -> - Legacy_store.Protocol.RawContents.read (store, hash)) - - let read_raw_opt global_state hash = - Shared.use global_state.protocol_store (fun store -> - Legacy_store.Protocol.RawContents.read_opt (store, hash)) - - let store global_state p = - let bytes = Protocol.to_bytes p in - let hash = Protocol.hash_raw bytes in - Shared.use global_state.protocol_store (fun store -> - Legacy_store.Protocol.Contents.known store hash >>= fun known -> - if known then Lwt.return_none - else - Legacy_store.Protocol.RawContents.store (store, hash) bytes - >>= fun () -> - Lwt_watcher.notify global_state.protocol_watcher hash ; - Lwt.return_some hash) - - let remove global_state hash = - Shared.use global_state.protocol_store (fun store -> - Legacy_store.Protocol.Contents.known store hash >>= fun known -> - if known then Lwt.return_false - else - Legacy_store.Protocol.Contents.remove store hash >>= fun () -> - Lwt.return_true) - - let list global_state = - Shared.use global_state.protocol_store (fun store -> - Legacy_store.Protocol.Contents.fold_keys - store - ~init:Protocol_hash.Set.empty - ~f:(fun x acc -> Lwt.return (Protocol_hash.Set.add x acc))) - - let watcher (state : global_state) = - Lwt_watcher.create_stream state.protocol_watcher -end - -module Current_mempool = struct - let set chain_state ~head mempool = - update_chain_data chain_state (fun _chain_data_store data -> - if Block_hash.equal head (Block.hash data.current_head) then - Lwt.return (Some {data with current_mempool = mempool}, ()) - else Lwt.return (None, ())) - - let get chain_state = - read_chain_data chain_state (fun _chain_data_store data -> - Lwt.return (Block.header data.current_head, data.current_mempool)) -end - -let may_create_chain ~commit_genesis state chain_id genesis = - Chain.get state chain_id >>= function - | Ok chain -> return chain - | Error _ -> - Chain.create - ~commit_genesis - ~allow_forked_chain:true - state - genesis - chain_id - -let read global_store context_index main_chain = - let global_data = - {chains = Chain_id.Table.create 17; global_store; context_index} - in - let state = - { - global_data = Shared.create global_data; - protocol_store = Shared.create @@ Legacy_store.Protocol.get global_store; - main_chain; - protocol_watcher = Lwt_watcher.create_input (); - block_watcher = Lwt_watcher.create_input (); - } - in - Chain.read_all state >>=? fun () -> return state - -(* FIXME: this should not be hard-coded *) -let max_locator_size = 200 - -let compute_locator_from_hash chain_state ?(max_size = max_locator_size) - ?min_level (head_hash, head_header) seed = - Shared.use chain_state.block_store (fun block_store -> - read_chain_data chain_state (fun _ chain_data -> - match min_level with - | None -> Lwt.return chain_data.caboose - | Some level -> ( - let head_level = head_header.Block_header.shell.level in - let distance = Int32.sub head_level level in - predecessor_n block_store head_hash (Int32.to_int distance) - >>= function - | None -> Lwt.return chain_data.caboose - | Some hash -> Lwt.return (level, hash))) - >>= fun (_lvl, caboose) -> - let get_predecessor = - match min_level with - | None -> predecessor_n block_store - | Some min_level -> ( - fun block_hash distance -> - predecessor_n block_store block_hash distance >>= function - | None -> Lwt.return_none - | Some pred_hash -> ( - Header.read_opt (block_store, pred_hash) >>= function - | None -> Lwt.return_none - | Some pred_header - when Compare.Int32.(pred_header.shell.level < min_level) -> - Lwt.return_none - | Some _ -> Lwt.return_some pred_hash)) - in - Block_locator.compute - ~get_predecessor - ~caboose - ~size:max_size - head_hash - head_header - seed) - -let compute_locator chain ?max_size head seed = - compute_locator_from_hash chain ?max_size (head.hash, Block.header head) seed - -let compute_protocol_locator chain_state ?max_size ~proto_level seed = - Chain.store chain_state >>= fun global_store -> - let chain_store = Legacy_store.Chain.get global_store chain_state.chain_id in - read_chain_data chain_state (fun _chain_store chain_data -> - Legacy_store.Chain.Protocol_info.read_opt chain_store proto_level - >>= function - | None -> Lwt.return_none - | Some (_protocol_hash, block_activation_level) -> ( - (* proto level's lower bound found, now retrieving the upper bound *) - let head_proto_level = Block.protocol_level chain_data.current_head in - if Compare.Int.(proto_level = head_proto_level) then - Lwt.return_some - ( block_activation_level, - Block. - (hash chain_data.current_head, header chain_data.current_head) - ) - else - Legacy_store.Chain.Protocol_info.read_opt - chain_store - (succ proto_level) - >>= function - | None -> Lwt.return_none - | Some (_, next_activation_level) -> ( - let last_level_in_protocol = - Int32.(pred next_activation_level) - in - let delta = - Int32.( - sub - (Block.level chain_data.current_head) - last_level_in_protocol) - in - Shared.use chain_state.block_store (fun block_store -> - predecessor_n - block_store - (Block.hash chain_data.current_head) - (Int32.to_int delta)) - >>= function - | None -> Lwt.return_none - | Some pred_hash -> - Shared.use chain_state.block_store (fun block_store -> - Header.read_opt (block_store, pred_hash) >>= function - | None -> Lwt.return_none - | Some pred_header -> - Lwt.return_some - (block_activation_level, (pred_hash, pred_header))) - ))) - >>= function - | None -> Lwt.return_none - | Some (block_activation_level, upper_block) -> - compute_locator_from_hash - chain_state - ?max_size - ~min_level:block_activation_level - upper_block - seed - >>= Lwt.return_some - -type error += - | Incorrect_legacy_history_mode_switch of { - previous_mode : History_mode.Legacy.t; - next_mode : History_mode.Legacy.t; - } - -let () = - register_error_kind - `Permanent - ~id:"node_config_file.incorrect_legacy_history_mode_switch" - ~title:"Incorrect legacy history mode switch" - ~description:"Incorrect legacy history mode switch." - ~pp:(fun ppf (prev, next) -> - Format.fprintf - ppf - "@[cannot switch from legacy history mode %a mode to %a mode@]" - History_mode.Legacy.pp - prev - History_mode.Legacy.pp - next) - (Data_encoding.obj2 - (Data_encoding.req "previous_mode" History_mode.Legacy.encoding) - (Data_encoding.req "next_mode" History_mode.Legacy.encoding)) - (function - | Incorrect_legacy_history_mode_switch x -> - Some (x.previous_mode, x.next_mode) - | _ -> None) - (fun (previous_mode, next_mode) -> - Incorrect_legacy_history_mode_switch {previous_mode; next_mode}) - -let init ?patch_context ?commit_genesis ?(store_mapsize = 40_960_000_000L) - ~store_root ~context_root ?history_mode ?(readonly = false) - (genesis : Genesis.t) = - Legacy_store.init ~mapsize:store_mapsize store_root >>=? fun global_store -> - (match commit_genesis with - | Some commit_genesis -> - Context.init ~readonly:true ?patch_context context_root - >>= fun context_index -> Lwt.return (context_index, commit_genesis) - | None -> - Context.init ~readonly ?patch_context context_root - >>= fun context_index -> - let commit_genesis ~chain_id = - Context.commit_genesis - context_index - ~chain_id - ~time:genesis.time - ~protocol:genesis.protocol - in - Lwt.return (context_index, commit_genesis)) - >>= fun (context_index, commit_genesis) -> - let chain_id = Chain_id.of_block_hash genesis.Genesis.block in - read global_store context_index chain_id >>=? fun state -> - may_create_chain ~commit_genesis state chain_id genesis - >>=? fun main_chain_state -> - (Legacy_store.Configuration.History_mode.read_opt global_store >>= function - | None -> - let mode = Option.value ~default:History_mode.Legacy.Full history_mode in - Legacy_store.Configuration.History_mode.store global_store mode - >>= fun () -> return mode - | Some previous_history_mode -> ( - match history_mode with - | None -> return previous_history_mode - | Some history_mode -> - if history_mode <> previous_history_mode then - fail - (Incorrect_legacy_history_mode_switch - { - previous_mode = previous_history_mode; - next_mode = history_mode; - }) - else return history_mode)) - >>=? fun history_mode -> - return (state, main_chain_state, context_index, history_mode) - -let history_mode {global_data; _} = - Shared.use global_data (fun {global_store; _} -> - Legacy_store.Configuration.History_mode.read_opt global_store - >|= WithExceptions.Option.get ~loc:__LOC__) - -let close {global_data; _} = - Shared.use global_data (fun {global_store; context_index; _} -> - Legacy_store.close global_store ; - Context.close context_index) diff --git a/src/lib_store/legacy_store/legacy_state.mli b/src/lib_store/legacy_store/legacy_state.mli deleted file mode 100644 index 6adbea6e77b0..000000000000 --- a/src/lib_store/legacy_store/legacy_state.mli +++ /dev/null @@ -1,457 +0,0 @@ -(*****************************************************************************) -(* *) -(* Open Source License *) -(* Copyright (c) 2018 Dynamic Ledger Solutions, Inc. *) -(* Copyright (c) 2020 Metastate AG *) -(* *) -(* Permission is hereby granted, free of charge, to any person obtaining a *) -(* copy of this software and associated documentation files (the "Software"),*) -(* to deal in the Software without restriction, including without limitation *) -(* the rights to use, copy, modify, merge, publish, distribute, sublicense, *) -(* and/or sell copies of the Software, and to permit persons to whom the *) -(* Software is furnished to do so, subject to the following conditions: *) -(* *) -(* The above copyright notice and this permission notice shall be included *) -(* in all copies or substantial portions of the Software. *) -(* *) -(* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR*) -(* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, *) -(* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL *) -(* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER*) -(* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING *) -(* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER *) -(* DEALINGS IN THE SOFTWARE. *) -(* *) -(*****************************************************************************) - -(** Tezos Shell - Abstraction over all the disk storage. - - It encapsulates access to: - - - the index of validation contexts; and - - the persistent state of the node: - - the blockchain and its alternate heads ; - - the pool of pending operations of a chain. *) - -type t - -type global_state = t - -(** {2 Network} *) - -(** Data specific to a given chain (e.g. the main chain or the current - test chain). *) -module Chain : sig - type t - - type chain_state = t - - (** Initialize a chain for a given [genesis]. By default, - the chain does accept forking test chain. When - [~allow_forked_chain:true] is provided, test chain are allowed. *) - val create : - global_state -> - ?allow_forked_chain:bool -> - commit_genesis:(chain_id:Chain_id.t -> Context_hash.t tzresult Lwt.t) -> - Genesis.t -> - Chain_id.t -> - chain_state tzresult Lwt.t - - (** Look up for a chain by the hash of its genesis block. *) - val get : global_state -> Chain_id.t -> chain_state tzresult Lwt.t - - val get_opt : global_state -> Chain_id.t -> chain_state option Lwt.t - - val get_exn : global_state -> Chain_id.t -> chain_state Lwt.t - - val main : global_state -> Chain_id.t - - val test : chain_state -> Chain_id.t option Lwt.t - - (** Returns all the known chains. *) - val all : global_state -> chain_state Seq.t Lwt.t - - (** Destroy a chain: this completely removes from the local storage all - the data associated to the chain (this includes blocks and - operations). *) - val destroy : global_state -> chain_state -> unit Lwt.t - - (** Various accessors. *) - val id : chain_state -> Chain_id.t - - val genesis : chain_state -> Genesis.t - - val global_state : chain_state -> global_state - - (** Hash of the faked block header of the genesis block. *) - val faked_genesis_hash : chain_state -> Block_hash.t - - (** Return the expiration timestamp of a test chain. *) - val expiration : chain_state -> Time.Protocol.t option - - val allow_forked_chain : chain_state -> bool - - val checkpoint : chain_state -> Block_header.t Lwt.t - - val save_point : chain_state -> (Int32.t * Block_hash.t) Lwt.t - - val caboose : chain_state -> (Int32.t * Block_hash.t) Lwt.t - - val store : chain_state -> Legacy_store.t Lwt.t - - (** Update the current checkpoint. The current head should be - consistent (i.e. it should either have a lower level or pass - through the checkpoint). In the process all the blocks from - invalid alternate heads are removed from the disk, either - completely (when `level <= checkpoint`) or still tagged as - invalid (when `level > checkpoint`). *) - val set_checkpoint : chain_state -> Block_header.t -> unit Lwt.t - - (** Apply [set_checkpoint] then [purge_full] (see {!History_mode.Legacy.t}). *) - val set_checkpoint_then_purge_full : - chain_state -> Block_header.t -> unit tzresult Lwt.t - - (** Apply [set_checkpoint] then [purge_rolling] (see {!History_mode.Legacy.t}). *) - val set_checkpoint_then_purge_rolling : - chain_state -> Block_header.t -> unit tzresult Lwt.t - - (** Check that a block is compatible with the current checkpoint. - This function assumes that the predecessor is known valid. *) - val acceptable_block : chain_state -> Block_header.t -> bool Lwt.t - - (** List all the indexed protocols in the chain. The resulting list - contains elements of the form [, (, - )]. *) - val all_indexed_protocols : - chain_state -> (int * (Protocol_hash.t * int32)) list Lwt.t - - (** Get the level indexed chain protocol store for the given header. *) - val get_level_indexed_protocol : - chain_state -> Block_header.t -> Protocol_hash.t Lwt.t - - (** Update the level indexed chain protocol store so that the block can easily access - its corresponding protocol hash from the protocol level in its header. - Also stores the transition block level. - *) - val update_level_indexed_protocol_store : - chain_state -> - Chain_id.t -> - int -> - Protocol_hash.t -> - Block_header.t -> - unit Lwt.t -end - -(** {2 Block database} *) - -type error += Block_not_found of Block_hash.t - -type error += Block_contents_not_found of Block_hash.t - -module Block : sig - type t = {chain_state : Chain.t; hash : Block_hash.t; header : Block_header.t} - - type block = t - - (** Abstract view over block header storage. - This module aims to abstract over block header's [read], [read_opt] and [known] - functions by calling the adequate function depending on the block being pruned or not. *) - module Header : sig - val read : - Legacy_store.Block.store * Block_hash.t -> Block_header.t tzresult Lwt.t - - val read_opt : - Legacy_store.Block.store * Block_hash.t -> Block_header.t option Lwt.t - - val known : Legacy_store.Block.store * Block_hash.t -> bool Lwt.t - end - - val known : Chain.t -> Block_hash.t -> bool Lwt.t - - val known_valid : Chain.t -> Block_hash.t -> bool Lwt.t - - val known_invalid : Chain.t -> Block_hash.t -> bool Lwt.t - - val read_invalid : - Chain.t -> Block_hash.t -> Legacy_store.Block.invalid_block option Lwt.t - - val list_invalid : Chain.t -> (Block_hash.t * int32 * error list) list Lwt.t - - val unmark_invalid : Chain.t -> Block_hash.t -> unit tzresult Lwt.t - - val read : Chain.t -> Block_hash.t -> t tzresult Lwt.t - - val read_contents_opt : t -> Legacy_store.Block.contents option Lwt.t - - val read_opt : Chain.t -> Block_hash.t -> t option Lwt.t - - (** Will return the full block if the block has never been cleaned - (all blocks for nodes whose history-mode is set to archive), only - the header for nodes below the save point (nodes in full or - rolling history-mode). Will fail with `Not_found` if the given - hash is unknown. *) - val read_predecessor : Chain.t -> pred:int -> Block_hash.t -> t option Lwt.t - - val store : - Chain.t -> - Block_header.t -> - Bytes.t -> - Operation.t list list -> - Bytes.t list list -> - Block_metadata_hash.t option -> - Operation_metadata_hash.t list list option -> - Block_validation.validation_store -> - forking_testchain:bool -> - block option tzresult Lwt.t - - val store_invalid : - Chain.t -> Block_header.t -> error list -> bool tzresult Lwt.t - - (** [remove block] deletes every occurrence of [block] in the - different stores. If [block] is the current head, the head is - also backtracked to the [block] predecessor *) - val remove : t -> unit tzresult Lwt.t - - val compare : t -> t -> int - - val equal : t -> t -> bool - - val hash : t -> Block_hash.t - - val header : t -> Block_header.t - - val header_of_hash : Chain.t -> Block_hash.t -> Block_header.t option Lwt.t - - val shell_header : t -> Block_header.shell_header - - val timestamp : t -> Time.Protocol.t - - val fitness : t -> Fitness.t - - val validation_passes : t -> int - - val chain_id : t -> Chain_id.t - - val chain_state : t -> Chain.t - - val level : t -> Int32.t - - val message : t -> string option tzresult Lwt.t - - val max_operations_ttl : t -> int tzresult Lwt.t - - val metadata : t -> Bytes.t tzresult Lwt.t - - val last_allowed_fork_level : t -> Int32.t tzresult Lwt.t - - val is_genesis : t -> bool - - val predecessor : t -> t option Lwt.t - - val predecessor_n : t -> int -> Block_hash.t option Lwt.t - - val is_valid_for_checkpoint : t -> Block_header.t -> bool Lwt.t - - val context : t -> Context.t tzresult Lwt.t - - val context_opt : t -> Context.t option Lwt.t - - val context_exn : t -> Context.t Lwt.t - - val context_exists : t -> bool Lwt.t - - val protocol_hash : t -> Protocol_hash.t tzresult Lwt.t - - val protocol_hash_exn : t -> Protocol_hash.t Lwt.t - - val test_chain : t -> (Test_chain_status.t * t option) Lwt.t - - val protocol_level : t -> int - - val operation_hashes : - t -> int -> (Operation_hash.t list * Operation_list_list_hash.path) Lwt.t - - val all_operation_hashes : t -> Operation_hash.t list list Lwt.t - - val operations : - t -> int -> (Operation.t list * Operation_list_list_hash.path) Lwt.t - - val all_operations : t -> Operation.t list list Lwt.t - - val operations_metadata : t -> int -> Bytes.t list Lwt.t - - val all_operations_metadata : t -> Bytes.t list list Lwt.t - - val metadata_hash : t -> Block_metadata_hash.t option Lwt.t - - val operations_metadata_hashes : - t -> int -> Operation_metadata_hash.t list option Lwt.t - - val all_operations_metadata_hashes : - t -> Operation_metadata_hash.t list list option Lwt.t - - val all_operations_metadata_hash : - t -> Operation_metadata_list_list_hash.t option Lwt.t - - val watcher : Chain.t -> block Lwt_stream.t * Lwt_watcher.stopper - - (** [known_ancestor chain_state locator] computes the unknown prefix in - the [locator] according to [chain_state]. - It either returns: - - [Some (h, hist)] when we find a valid block, where [hist] - is the unknown prefix, ending with the first valid block found. - - [Some (h, hist)] when we don't find any block known valid nor invalid - and the node runs in full or rolling mode. In this case - [(h, hist)] is the given [locator]. - - [None] when the node runs in archive history mode and - we find an invalid block or no valid block in the [locator]. - - [None] when the node runs in full or rolling mode and we find - an invalid block in the [locator]. *) - val known_ancestor : - Chain.t -> Block_locator.t -> Block_locator.t option Lwt.t - - val get_rpc_directory : t -> t RPC_directory.t option Lwt.t - - val set_rpc_directory : t -> t RPC_directory.t -> unit tzresult Lwt.t - - val get_header_rpc_directory : - Chain.t -> - Block_header.t -> - (Chain.t * Block_hash.t * Block_header.t) RPC_directory.t option Lwt.t - - val set_header_rpc_directory : - Chain.t -> - Block_header.t -> - (Chain.t * Block_hash.t * Block_header.t) RPC_directory.t -> - unit Lwt.t -end - -val read_block : global_state -> Block_hash.t -> Block.t option Lwt.t - -val read_block_exn : global_state -> Block_hash.t -> Block.t Lwt.t - -val watcher : t -> Block.t Lwt_stream.t * Lwt_watcher.stopper - -(** Computes the block with the best fitness amongst the known blocks - which are compatible with the given checkpoint. *) -val best_known_head_for_checkpoint : Chain.t -> Block_header.t -> Block.t Lwt.t - -val update_testchain : Block.t -> testchain_state:Chain.t -> unit Lwt.t - -val fork_testchain : - Block.t -> - Chain_id.t -> - Block_hash.t -> - Block_header.t -> - Protocol_hash.t -> - Time.Protocol.t -> - Chain.t tzresult Lwt.t - -type chain_data = { - current_head : Block.t; - current_mempool : Mempool.t; - live_blocks : Block_hash.Set.t; - live_operations : Operation_hash.Set.t; - test_chain : Chain_id.t option; - save_point : Int32.t * Block_hash.t; - caboose : Int32.t * Block_hash.t; -} - -val read_chain_data : - Chain.t -> - (Legacy_store.Chain_data.store -> chain_data -> 'a Lwt.t) -> - 'a Lwt.t - -val update_chain_data : - Chain.t -> - (Legacy_store.Chain_data.store -> - chain_data -> - (chain_data option * 'a) Lwt.t) -> - 'a Lwt.t - -(** {2 Protocol database} *) - -module Protocol : sig - include module type of struct - include Protocol - end - - (** Is a value stored in the local database ? *) - val known : global_state -> Protocol_hash.t -> bool Lwt.t - - (** Read a value in the local database. *) - val read : global_state -> Protocol_hash.t -> Protocol.t tzresult Lwt.t - - val read_opt : global_state -> Protocol_hash.t -> Protocol.t option Lwt.t - - (** Read a value in the local database (without parsing). *) - val read_raw : global_state -> Protocol_hash.t -> Bytes.t tzresult Lwt.t - - val read_raw_opt : global_state -> Protocol_hash.t -> Bytes.t option Lwt.t - - val store : global_state -> Protocol.t -> Protocol_hash.t option Lwt.t - - (** Remove a value from the local database. *) - val remove : global_state -> Protocol_hash.t -> bool Lwt.t - - val list : global_state -> Protocol_hash.Set.t Lwt.t - - val watcher : - global_state -> Protocol_hash.t Lwt_stream.t * Lwt_watcher.stopper -end - -module Current_mempool : sig - (** The current mempool. *) - val get : Chain.t -> (Block_header.t * Mempool.t) Lwt.t - - (** Set the current mempool. It is ignored if the current head is - not the provided one. *) - val set : Chain.t -> head:Block_hash.t -> Mempool.t -> unit Lwt.t -end - -type error += - | Incorrect_legacy_history_mode_switch of { - previous_mode : History_mode.Legacy.t; - next_mode : History_mode.Legacy.t; - } - -val history_mode : global_state -> History_mode.Legacy.t Lwt.t - -(** [compute_locator chain ?max_size block seed] computes a - locator of the [chain] from [head] to the chain's caboose or until - the locator contains [max_size] steps. - [max_size] defaults to 200. *) -val compute_locator : - Chain.t -> - ?max_size:int -> - Block.t -> - Block_locator.seed -> - Block_locator.t Lwt.t - -(** [compute_protocol_locator chain ?max_size ~proto_level seed] - computes a locator for a specific protocol of level [proto_level] - in the [chain] from the latest block with this protocol to its - activation block or until the locator contains [max_size] steps. - [max_size] defaults to 200. *) -val compute_protocol_locator : - Chain.t -> - ?max_size:int -> - proto_level:int -> - Block_locator.seed -> - Block_locator.t option Lwt.t - -(** Read the internal state of the node and initialize - the databases. *) -val init : - ?patch_context:(Context.t -> Context.t tzresult Lwt.t) -> - ?commit_genesis:(chain_id:Chain_id.t -> Context_hash.t tzresult Lwt.t) -> - ?store_mapsize:int64 -> - store_root:string -> - context_root:string -> - ?history_mode:History_mode.Legacy.t -> - ?readonly:bool -> - Genesis.t -> - (global_state * Chain.t * Context.index * History_mode.Legacy.t) tzresult - Lwt.t - -val close : global_state -> unit Lwt.t diff --git a/src/lib_store/legacy_store/legacy_store.ml b/src/lib_store/legacy_store/legacy_store.ml deleted file mode 100644 index 1a506268e543..000000000000 --- a/src/lib_store/legacy_store/legacy_store.ml +++ /dev/null @@ -1,469 +0,0 @@ -(*****************************************************************************) -(* *) -(* Open Source License *) -(* Copyright (c) 2018 Dynamic Ledger Solutions, Inc. *) -(* Copyright (c) 2020 Metastate AG *) -(* Copyright (c) 2020 Nomadic Labs, *) -(* *) -(* Permission is hereby granted, free of charge, to any person obtaining a *) -(* copy of this software and associated documentation files (the "Software"),*) -(* to deal in the Software without restriction, including without limitation *) -(* the rights to use, copy, modify, merge, publish, distribute, sublicense, *) -(* and/or sell copies of the Software, and to permit persons to whom the *) -(* Software is furnished to do so, subject to the following conditions: *) -(* *) -(* The above copyright notice and this permission notice shall be included *) -(* in all copies or substantial portions of the Software. *) -(* *) -(* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR*) -(* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, *) -(* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL *) -(* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER*) -(* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING *) -(* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER *) -(* DEALINGS IN THE SOFTWARE. *) -(* *) -(*****************************************************************************) - -type t = Raw_store.t - -type global_store = t - -(************************************************************************** - * Configuration setup we need to save in order to avoid wrong changes. - **************************************************************************) - -module Configuration = struct - module History_mode = - Store_helpers.Make_single_store - (Raw_store) - (struct - let name = ["history_mode"] - end) - (Store_helpers.Make_value (History_mode.Legacy)) -end - -(************************************************************************** - * Net store under "chain/" - **************************************************************************) - -module Chain = struct - type store = global_store * Chain_id.t - - let get s id = (s, id) - - module Indexed_store = - Store_helpers.Make_indexed_substore - (Store_helpers.Make_substore - (Raw_store) - (struct - let name = ["chain"] - end)) - (Chain_id) - - let destroy = Indexed_store.remove_all - - let list t = - Indexed_store.fold_indexes t ~init:[] ~f:(fun h acc -> - Lwt.return (h :: acc)) - - module Genesis_hash = - Store_helpers.Make_single_store - (Indexed_store.Store) - (struct - let name = ["genesis"; "hash"] - end) - (Store_helpers.Make_value (Block_hash)) - - module Genesis_time = - Store_helpers.Make_single_store - (Indexed_store.Store) - (struct - let name = ["genesis"; "time"] - end) - (Store_helpers.Make_value (Time.Protocol)) - - module Genesis_protocol = - Store_helpers.Make_single_store - (Indexed_store.Store) - (struct - let name = ["genesis"; "protocol"] - end) - (Store_helpers.Make_value (Protocol_hash)) - - module Genesis_test_protocol = - Store_helpers.Make_single_store - (Indexed_store.Store) - (struct - let name = ["genesis"; "test_protocol"] - end) - (Store_helpers.Make_value (Protocol_hash)) - - module Expiration = - Store_helpers.Make_single_store - (Indexed_store.Store) - (struct - let name = ["expiration"] - end) - (Store_helpers.Make_value (Time.Protocol)) - - module Allow_forked_chain = Indexed_store.Make_set (struct - let name = ["allow_forked_chain"] - end) - - module Protocol_index = - Store_helpers.Make_indexed_substore - (Store_helpers.Make_substore - (Indexed_store.Store) - (struct - let name = ["protocol"] - end)) - (Store_helpers.Integer_index) - - module Protocol_info = - Protocol_index.Make_map - (struct - let name = ["info"] - end) - (Store_helpers.Make_value (struct - type t = Protocol_hash.t * Int32.t - - let encoding = - let open Data_encoding in - tup2 Protocol_hash.encoding int32 - end)) -end - -(************************************************************************** - * Temporary test chain forking block store under "forking_block_hash/" - **************************************************************************) - -module Forking_block_hash = - Store_helpers.Make_map - (Store_helpers.Make_substore - (Raw_store) - (struct - let name = ["forking_block_hash"] - end)) - (Chain_id) - (Store_helpers.Make_value (Block_hash)) - -(************************************************************************** - * Block_header store under "chain//blocks/" - **************************************************************************) - -module Block = struct - type store = Chain.store - - let get x = x - - module Indexed_store = - Store_helpers.Make_indexed_substore - (Store_helpers.Make_substore - (Chain.Indexed_store.Store) - (struct - let name = ["blocks"] - end)) - (Block_hash) - - type contents = { - header : Block_header.t; - message : string option; - max_operations_ttl : int; - last_allowed_fork_level : Int32.t; - context : Context_hash.t; - metadata : Bytes.t; - } - - module Contents = - Store_helpers.Make_single_store - (Indexed_store.Store) - (struct - let name = ["contents"] - end) - (Store_helpers.Make_value (struct - type t = contents - - let encoding = - let open Data_encoding in - conv - (fun { - header; - message; - max_operations_ttl; - last_allowed_fork_level; - context; - metadata; - } -> - ( message, - max_operations_ttl, - last_allowed_fork_level, - context, - metadata, - header )) - (fun ( message, - max_operations_ttl, - last_allowed_fork_level, - context, - metadata, - header ) -> - { - header; - message; - max_operations_ttl; - last_allowed_fork_level; - context; - metadata; - }) - (obj6 - (opt "message" string) - (req "max_operations_ttl" uint16) - (req "last_allowed_fork_level" int32) - (req "context" Context_hash.encoding) - (req "metadata" bytes) - (req "header" Block_header.encoding)) - end)) - - type pruned_contents = {header : Block_header.t} - - module Pruned_contents = - Store_helpers.Make_single_store - (Indexed_store.Store) - (struct - let name = ["pruned_contents"] - end) - (Store_helpers.Make_value (struct - type t = pruned_contents - - let encoding = - let open Data_encoding in - conv - (fun {header} -> header) - (fun header -> {header}) - (obj1 (req "header" Block_header.encoding)) - end)) - - module Block_metadata_hash = - Store_helpers.Make_single_store - (Indexed_store.Store) - (struct - let name = ["block_metadata_hash"] - end) - (Store_helpers.Make_value (Block_metadata_hash)) - - module Operations_index = - Store_helpers.Make_indexed_substore - (Store_helpers.Make_substore - (Indexed_store.Store) - (struct - let name = ["operations"] - end)) - (Store_helpers.Integer_index) - - module Operation_hashes = - Operations_index.Make_map - (struct - let name = ["hashes"] - end) - (Store_helpers.Make_value (struct - type t = Operation_hash.t list - - let encoding = Data_encoding.list Operation_hash.encoding - end)) - - module Operations = - Operations_index.Make_map - (struct - let name = ["contents"] - end) - (Store_helpers.Make_value (struct - type t = Operation.t list - - let encoding = Data_encoding.(list (dynamic_size Operation.encoding)) - end)) - - module Operations_metadata = - Operations_index.Make_map - (struct - let name = ["metadata"] - end) - (Store_helpers.Make_value (struct - type t = Bytes.t list - - let encoding = Data_encoding.(list bytes) - end)) - - module Operations_metadata_hashes = - Operations_index.Make_map - (struct - let name = ["operations_metadata_hashes"] - end) - (Store_helpers.Make_value (struct - type t = Operation_metadata_hash.t list - - let encoding = Data_encoding.(list Operation_metadata_hash.encoding) - end)) - - type invalid_block = {level : int32; errors : Error_monad.error list} - - module Invalid_block = - Store_helpers.Make_map - (Store_helpers.Make_substore - (Chain.Indexed_store.Store) - (struct - let name = ["invalid_blocks"] - end)) - (Block_hash) - (Store_helpers.Make_value (struct - type t = invalid_block - - let encoding = - let open Data_encoding in - conv - (fun {level; errors} -> (level, errors)) - (fun (level, errors) -> {level; errors}) - (tup2 int32 (list Error_monad.error_encoding)) - end)) - - let register s = - Base58.register_resolver Block_hash.b58check_encoding (fun str -> - let pstr = Block_hash.prefix_path str in - Chain.Indexed_store.fold_indexes s ~init:[] ~f:(fun chain acc -> - Indexed_store.resolve_index (s, chain) pstr >>= fun l -> - Lwt.return (List.rev_append l acc))) - - module Predecessors = - Store_helpers.Make_map - (Store_helpers.Make_substore - (Indexed_store.Store) - (struct - let name = ["predecessors"] - end)) - (Store_helpers.Integer_index) - (Store_helpers.Make_value (Block_hash)) -end - -(************************************************************************** - * Blockchain data - **************************************************************************) - -module Chain_data = struct - type store = Chain.store - - let get s = s - - module Known_heads = - Store_helpers.Make_buffered_set - (Store_helpers.Make_substore - (Chain.Indexed_store.Store) - (struct - let name = ["known_heads"] - end)) - (Block_hash) - (Block_hash.Set) - - module Current_head = - Store_helpers.Make_single_store - (Chain.Indexed_store.Store) - (struct - let name = ["current_head"] - end) - (Store_helpers.Make_value (Block_hash)) - - module In_main_branch = - Store_helpers.Make_single_store - (Block.Indexed_store.Store) - (struct - let name = ["in_chain"] - end) - (Store_helpers.Make_value (Block_hash)) - - (* successor *) - - module Checkpoint = - Store_helpers.Make_single_store - (Chain.Indexed_store.Store) - (struct - let name = ["checkpoint"] - end) - (Store_helpers.Make_value (Block_header)) - - module Save_point = - Store_helpers.Make_single_store - (Chain.Indexed_store.Store) - (struct - let name = ["save_point"] - end) - (Store_helpers.Make_value (struct - type t = Int32.t * Block_hash.t - - let encoding = - let open Data_encoding in - tup2 int32 Block_hash.encoding - end)) - - module Caboose = - Store_helpers.Make_single_store - (Chain.Indexed_store.Store) - (struct - let name = ["caboose"] - end) - (Store_helpers.Make_value (struct - type t = Int32.t * Block_hash.t - - let encoding = - let open Data_encoding in - tup2 int32 Block_hash.encoding - end)) -end - -(************************************************************************** - * Protocol store under "protocols/" - **************************************************************************) - -module Protocol = struct - type store = global_store - - let get x = x - - module Indexed_store = - Store_helpers.Make_indexed_substore - (Store_helpers.Make_substore - (Raw_store) - (struct - let name = ["protocols"] - end)) - (Protocol_hash) - - module Contents = - Indexed_store.Make_map - (struct - let name = ["contents"] - end) - (Store_helpers.Make_value (Protocol)) - - module RawContents = - Store_helpers.Make_single_store - (Indexed_store.Store) - (struct - let name = ["contents"] - end) - (Store_helpers.Raw_value) - - let register s = - Base58.register_resolver Protocol_hash.b58check_encoding (fun str -> - let pstr = Protocol_hash.prefix_path str in - Indexed_store.resolve_index s pstr) -end - -let init ?readonly ?mapsize dir = - Raw_store.init ?readonly ?mapsize dir >>=? fun s -> - Block.register s ; - Protocol.register s ; - return s - -let close = Raw_store.close - -let open_with_atomic_rw = Raw_store.open_with_atomic_rw - -let with_atomic_rw = Raw_store.with_atomic_rw diff --git a/src/lib_store/legacy_store/legacy_store.mli b/src/lib_store/legacy_store/legacy_store.mli deleted file mode 100644 index 010402eafc8e..000000000000 --- a/src/lib_store/legacy_store/legacy_store.mli +++ /dev/null @@ -1,237 +0,0 @@ -(*****************************************************************************) -(* *) -(* Open Source License *) -(* Copyright (c) 2018 Dynamic Ledger Solutions, Inc. *) -(* Copyright (c) 2020 Metastate AG *) -(* Copyright (c) 2020 Nomadic Labs, *) -(* *) -(* Permission is hereby granted, free of charge, to any person obtaining a *) -(* copy of this software and associated documentation files (the "Software"),*) -(* to deal in the Software without restriction, including without limitation *) -(* the rights to use, copy, modify, merge, publish, distribute, sublicense, *) -(* and/or sell copies of the Software, and to permit persons to whom the *) -(* Software is furnished to do so, subject to the following conditions: *) -(* *) -(* The above copyright notice and this permission notice shall be included *) -(* in all copies or substantial portions of the Software. *) -(* *) -(* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR*) -(* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, *) -(* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL *) -(* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER*) -(* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING *) -(* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER *) -(* DEALINGS IN THE SOFTWARE. *) -(* *) -(*****************************************************************************) - -(** Copy of legacy src/lib_shell/store.mli *) - -open Store_sigs - -type t - -type global_store = t - -(** [init ~mapsize path] returns an initialized store at [path] of - maximum capacity [mapsize] bytes. *) -val init : ?readonly:bool -> ?mapsize:int64 -> string -> t tzresult Lwt.t - -val close : t -> unit - -val open_with_atomic_rw : - ?mapsize:int64 -> - string -> - (t -> 'a Error_monad.tzresult Lwt.t) -> - 'a tzresult Lwt.t - -val with_atomic_rw : t -> (unit -> 'a Lwt.t) -> 'a Lwt.t - -(** {2 Configuration} *) - -module Configuration : sig - module History_mode : - SINGLE_STORE - with type t := global_store - and type value := History_mode.Legacy.t -end - -(** {2 Chain store} *) - -module Chain : sig - val list : global_store -> Chain_id.t list Lwt.t - - val destroy : global_store -> Chain_id.t -> unit Lwt.t - - type store - - val get : global_store -> Chain_id.t -> store - - module Genesis_hash : - SINGLE_STORE with type t := store and type value := Block_hash.t - - module Genesis_time : - SINGLE_STORE with type t := store and type value := Time.Protocol.t - - module Genesis_protocol : - SINGLE_STORE with type t := store and type value := Protocol_hash.t - - module Genesis_test_protocol : - SINGLE_STORE with type t := store and type value := Protocol_hash.t - - module Expiration : - SINGLE_STORE with type t := store and type value := Time.Protocol.t - - module Allow_forked_chain : - SET_STORE with type t := t and type elt := Chain_id.t - - module Protocol_info : - MAP_STORE - with type t = store - and type key = int - and type value = Protocol_hash.t * Int32.t -end - -(** {2 Mutable chain data} *) - -module Chain_data : sig - type store - - val get : Chain.store -> store - - module Current_head : - SINGLE_STORE with type t := store and type value := Block_hash.t - - module Known_heads : - BUFFERED_SET_STORE - with type t := store - and type elt := Block_hash.t - and module Set := Block_hash.Set - - module In_main_branch : - SINGLE_STORE - with type t = store * Block_hash.t - and type value := Block_hash.t - - (* successor *) - - module Checkpoint : - SINGLE_STORE with type t := store and type value := Block_header.t - - module Save_point : - SINGLE_STORE with type t := store and type value := Int32.t * Block_hash.t - - module Caboose : - SINGLE_STORE with type t := store and type value := Int32.t * Block_hash.t -end - -(** {2 Block header store} *) - -module Block : sig - type store = Chain.store - - val get : Chain.store -> store - - type contents = { - header : Block_header.t; - message : string option; - max_operations_ttl : int; - last_allowed_fork_level : Int32.t; - context : Context_hash.t; - metadata : Bytes.t; - } - - module Contents : - SINGLE_STORE with type t = store * Block_hash.t and type value := contents - - (** Block header storage used for pruned blocks. - Blocks that are not pruned have their header - stored in their contents (see {!Store.Block.Contents}). - For an abstraction over a block header, please see - the {!State.Block.Header} module. - *) - - type pruned_contents = {header : Block_header.t} - - module Pruned_contents : - SINGLE_STORE - with type t = store * Block_hash.t - and type value := pruned_contents - - (* The hash of block header metadata, only set on blocks starting from - environment V1. *) - module Block_metadata_hash : - SINGLE_STORE - with type t = store * Block_hash.t - and type value := Block_metadata_hash.t - - module Operation_hashes : - MAP_STORE - with type t = store * Block_hash.t - and type key = int - and type value = Operation_hash.t list - - module Operations : - MAP_STORE - with type t = store * Block_hash.t - and type key = int - and type value = Operation.t list - - module Operations_metadata : - MAP_STORE - with type t = store * Block_hash.t - and type key = int - and type value = Bytes.t list - - (* The hashes of operations metadata, only set on blocks starting from - environment V1. *) - module Operations_metadata_hashes : - MAP_STORE - with type t = store * Block_hash.t - and type key = int - and type value = Operation_metadata_hash.t list - - type invalid_block = {level : int32; errors : Error_monad.error list} - - module Invalid_block : - MAP_STORE - with type t = store - and type key = Block_hash.t - and type value = invalid_block - - (** - Block predecessors under - [/blocks//predecessors//]. - Used to compute block predecessors in [lib_node_shell/state.ml]. - *) - module Predecessors : - MAP_STORE - with type t = store * Block_hash.t - and type key = int - and type value = Block_hash.t -end - -(** {2 Protocol store} *) - -module Protocol : sig - type store = global_store - - val get : global_store -> store - - module Contents : - MAP_STORE - with type t := store - and type key := Protocol_hash.t - and type value := Protocol.t - - module RawContents : - SINGLE_STORE with type t = store * Protocol_hash.t and type value := Bytes.t -end - -(** {2 Temporary test chain forking block store} *) - -module Forking_block_hash : - MAP_STORE - with type t = global_store - and type key := Chain_id.t - and type value := Block_hash.t diff --git a/src/lib_store/legacy_store/legacy_store_builder.ml b/src/lib_store/legacy_store/legacy_store_builder.ml deleted file mode 100644 index f4df090e9eed..000000000000 --- a/src/lib_store/legacy_store/legacy_store_builder.ml +++ /dev/null @@ -1,349 +0,0 @@ -(*****************************************************************************) -(* *) -(* Open Source License *) -(* Copyright (c) 2020 Nomadic Labs, *) -(* *) -(* Permission is hereby granted, free of charge, to any person obtaining a *) -(* copy of this software and associated documentation files (the "Software"),*) -(* to deal in the Software without restriction, including without limitation *) -(* the rights to use, copy, modify, merge, publish, distribute, sublicense, *) -(* and/or sell copies of the Software, and to permit persons to whom the *) -(* Software is furnished to do so, subject to the following conditions: *) -(* *) -(* The above copyright notice and this permission notice shall be included *) -(* in all copies or substantial portions of the Software. *) -(* *) -(* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR*) -(* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, *) -(* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL *) -(* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER*) -(* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING *) -(* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER *) -(* DEALINGS IN THE SOFTWARE. *) -(* *) -(*****************************************************************************) - -open Filename.Infix - -(* Inlined from Tezos_shell.Patch_context to avoid cyclic dependencies *) -let patch_context (genesis : Genesis.t) key_json ctxt = - (match key_json with - | None -> Lwt.return ctxt - | Some (key, json) -> - Tezos_context.Context.add - ctxt - [key] - (Data_encoding.Binary.to_bytes_exn Data_encoding.json json)) - >>= fun ctxt -> - Registered_protocol.get_result genesis.protocol >>=? fun proto -> - let module Proto = (val proto) in - let ctxt = Tezos_shell_context.Shell_context.wrap_disk_context ctxt in - Proto.init - ctxt - { - level = 0l; - proto_level = 0; - predecessor = genesis.block; - timestamp = genesis.time; - validation_passes = 0; - operations_hash = Operation_list_list_hash.empty; - fitness = []; - context = Context_hash.zero; - } - >>=? fun {context; _} -> - return (Tezos_shell_context.Shell_context.unwrap_disk_context context) - -let load_protocol proto protocol_root = - if Registered_protocol.mem proto then return_unit - else - let cmxs_file = - protocol_root - // Protocol_hash.to_short_b58check proto - // Format.asprintf "protocol_%a" Protocol_hash.pp proto - in - try - Dynlink.loadfile_private (cmxs_file ^ ".cmxs") ; - return_unit - with Dynlink.Error err -> - Format.ksprintf - (fun msg -> - fail - Block_validator_errors.( - Validation_process_failed (Protocol_dynlink_failure msg))) - "Cannot load file: %s. (Expected location: %s.)" - (Dynlink.error_message err) - cmxs_file - -(* From "legacy chain_validator"*) -let may_update_checkpoint chain_state new_head = - Legacy_state.Chain.checkpoint chain_state >>= fun checkpoint -> - Legacy_state.Block.last_allowed_fork_level new_head >>=? fun new_level -> - if new_level <= checkpoint.shell.level then return_unit - else - let state = Legacy_state.Chain.global_state chain_state in - Legacy_state.history_mode state >>= fun history_mode -> - let head_level = Legacy_state.Block.level new_head in - Legacy_state.Block.predecessor_n - new_head - (Int32.to_int (Int32.sub head_level new_level)) - >>= function - | None -> assert false (* should not happen *) - | Some new_checkpoint -> ( - Legacy_state.Block.read_opt chain_state new_checkpoint >>= function - | None -> assert false (* should not happen *) - | Some new_checkpoint -> ( - let new_checkpoint = Legacy_state.Block.header new_checkpoint in - match history_mode with - | History_mode.Legacy.Archive -> - Legacy_state.Chain.set_checkpoint chain_state new_checkpoint - >>= fun () -> return_unit - | Full -> - Legacy_state.Chain.set_checkpoint_then_purge_full - chain_state - new_checkpoint - | Rolling -> - Legacy_state.Chain.set_checkpoint_then_purge_rolling - chain_state - new_checkpoint)) - -let generate identity_file pow = - let target = Crypto_box.make_pow_target pow in - Format.eprintf "Generating a new identity... (level: %.2f) " pow ; - P2p_identity.generate_with_bound target >>= fun id -> - Lwt_utils_unix.Json.write_file - identity_file - (Data_encoding.Json.construct P2p_identity.encoding id) - >>=? fun () -> return_unit - -let dump_config data_dir = - (* version *) - let data_version = "0.0.4" in - let version_file_name = "version.json" in - let version_file data_dir = data_dir // version_file_name in - let write_version_file data_dir = - let version_file = version_file data_dir in - let version_encoding = Data_encoding.(obj1 (req "version" string)) in - Lwt_utils_unix.Json.write_file - version_file - (Data_encoding.Json.construct version_encoding data_version) - in - write_version_file data_dir >>=? fun () -> - (* identity *) - let identity_file_name = "identity.json" in - let identity_file = data_dir // identity_file_name in - generate identity_file 0. >>=? fun () -> - (* config *) - (* TODO write a config ? *) - let _config_file_name = "config.json" in - (* TODO write some peers ? *) - let _peers_file_name = "peers.json" in - (* TODO protocols ? *) - let _protocol_dir data_dir = data_dir // "protocol" in - return_unit - -let run () = - let ok msg = - External_validation.send Lwt_io.stdout Data_encoding.string (msg ^ "\n") - >>= fun () -> Lwt_io.flush_all () >>= return - in - Error_monad.protect - (fun () -> - let usg_msg = Format.sprintf "Usage: %s " Sys.argv.(0) in - let output_dir = ref None in - Arg.parse - [] - (fun dir -> - if !output_dir <> None then raise (Arg.Bad usg_msg) - else output_dir := Some dir) - usg_msg ; - let output_dir = - WithExceptions.Option.to_exn - ~none:(Invalid_argument usg_msg) - !output_dir - in - if Sys.file_exists output_dir then - Format.ksprintf Stdlib.failwith "%s already exists" output_dir ; - Lwt_utils_unix.create_dir output_dir >>= fun () -> - dump_config output_dir >>=? fun () -> - let store_root = Filename.concat output_dir "store" in - let context_root = Filename.concat output_dir "context" in - (* Start listening for messages on stdin *) - External_validation.recv - Lwt_io.stdin - Data_encoding.( - tup2 - History_mode.Legacy.encoding - External_validation.parameters_encoding) - >>= fun ( history_mode, - { - External_validation.context_root = _; - protocol_root; - sandbox_parameters; - genesis; - user_activated_upgrades; - user_activated_protocol_overrides; - } ) -> - let sandbox_param = - Option.map (fun p -> ("sandbox_parameter", p)) sandbox_parameters - in - let patch_context ctxt = patch_context genesis sandbox_param ctxt in - (* TODO parametrize this *) - Legacy_state.init - ~patch_context - ~store_root - ~context_root - ~history_mode - ~readonly:false - genesis - >>=? fun (state, chain, context_index, _history_mode) -> - (* Storing protocols *) - Seq.iter_es - (fun (module P : Registered_protocol.T) -> - let proto = - Registered_protocol.get_embedded_sources P.hash - |> WithExceptions.Option.get ~loc:__LOC__ - in - Legacy_state.Protocol.store state proto >>= function - | None -> - Format.kasprintf - ok - "could not store protocol %a" - Protocol_hash.pp - P.hash - | Some p -> - Format.kasprintf ok "stored protocol %a" Protocol_hash.pp p) - (Registered_protocol.seq ()) - >>=? fun () -> - let rec loop () = - External_validation.recv - Lwt_io.stdin - External_validation.request_encoding - >>= function - | External_validation.Fork_test_chain _ (* TODO *) - | External_validation.Init | External_validation.Commit_genesis _ - (* commit_genesis is done by [Legacy_state.init] *) - | External_validation.Terminate -> - ok "exiting" >>=? fun () -> exit 0 - | External_validation.Reconfigure_event_logging _ -> - (* noop *) ok "noop" - | External_validation.Validate - { - chain_id; - block_header; - predecessor_block_header; - operations; - max_operations_ttl; - predecessor_block_metadata_hash; - predecessor_ops_metadata_hash; - } -> - let pred_context_hash = predecessor_block_header.shell.context in - ( (Context.checkout context_index pred_context_hash >>= function - | Some context -> return context - | None -> - fail - (Block_validator_errors.Failed_to_checkout_context - pred_context_hash)) - >>=? fun predecessor_context -> - Context.get_protocol predecessor_context >>= fun protocol_hash -> - load_protocol protocol_hash protocol_root >>=? fun () -> - (* This call commits in the context *) - let apply_environment = - { - Block_validation.max_operations_ttl; - chain_id; - predecessor_block_header; - predecessor_context; - predecessor_block_metadata_hash; - predecessor_ops_metadata_hash; - user_activated_upgrades; - user_activated_protocol_overrides; - } - in - Block_validation.apply - apply_environment - block_header - operations - ~cache:`Lazy - >>= function - | Error - [Block_validator_errors.Unavailable_protocol {protocol; _}] as - err -> ( - (* If `next_protocol` is missing, try to load it *) - load_protocol protocol protocol_root - >>= function - | Error _ -> Lwt.return err - | Ok () -> - Block_validation.apply - apply_environment - block_header - operations - ~cache:`Lazy) - | result -> Lwt.return result ) - >>=? fun {result; _} -> - let { - Block_validation.validation_store; - block_metadata; - ops_metadata; - block_metadata_hash; - ops_metadata_hashes; - } = - result - in - (Context.checkout context_index validation_store.context_hash - >>= function - | Some context -> return context - | None -> - fail - (Block_validator_errors.Failed_to_checkout_context - validation_store.context_hash)) - >>=? fun commited_context -> - Context.get_protocol commited_context >>= fun protocol_hash -> - Legacy_state.Chain.update_level_indexed_protocol_store - chain - chain_id - block_header.shell.proto_level - protocol_hash - block_header - >>= fun () -> - Legacy_state.Block.store - chain - block_header - block_metadata - operations - ops_metadata - block_metadata_hash - ops_metadata_hashes - validation_store - ~forking_testchain:false - >>=? fun block -> - let block = - WithExceptions.Option.to_exn - ~none:(Failure "failed to store") - block - in - Legacy_chain.set_head chain block >>=? fun _prev_head -> - may_update_checkpoint chain block >>=? fun () -> - let msg = - Data_encoding.Json.( - construct Block_validation.result_encoding result |> to_string) - in - let block_hash = Block_header.hash block_header in - Format.kasprintf - ok - "validated and stored %a: %s" - Block_hash.pp - block_hash - msg - >>=? fun () -> loop () - | _ -> assert false - in - loop ()) - ~on_error:(fun err -> - Format.kasprintf ok "error: %a" pp_print_trace err >>=? fun () -> exit 1) - -let () = - match Lwt_main.run (run ()) with - | Ok () -> () - | Error err -> - Format.eprintf "%a@." pp_print_trace err ; - exit 1 diff --git a/src/lib_store/legacy_store/raw_store.ml b/src/lib_store/legacy_store/raw_store.ml deleted file mode 100644 index 9b402eed32a8..000000000000 --- a/src/lib_store/legacy_store/raw_store.ml +++ /dev/null @@ -1,314 +0,0 @@ -(*****************************************************************************) -(* *) -(* Open Source License *) -(* Copyright (c) 2018 Dynamic Ledger Solutions, Inc. *) -(* *) -(* Permission is hereby granted, free of charge, to any person obtaining a *) -(* copy of this software and associated documentation files (the "Software"),*) -(* to deal in the Software without restriction, including without limitation *) -(* the rights to use, copy, modify, merge, publish, distribute, sublicense, *) -(* and/or sell copies of the Software, and to permit persons to whom the *) -(* Software is furnished to do so, subject to the following conditions: *) -(* *) -(* The above copyright notice and this permission notice shall be included *) -(* in all copies or substantial portions of the Software. *) -(* *) -(* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR*) -(* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, *) -(* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL *) -(* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER*) -(* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING *) -(* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER *) -(* DEALINGS IN THE SOFTWARE. *) -(* *) -(*****************************************************************************) - -open Rresult - -type t = { - dir : Lmdb.t; - parent : (Lmdb.rw Lmdb.txn * Lmdb.db * Lmdb.rw Lmdb.cursor) Lwt.key; -} - -type key = string list - -type value = Bytes.t - -type error += Unknown of string list - -let () = - Error_monad.register_error_kind - `Permanent - ~id:"raw_store.unknown" - ~title:"Missing key in store" - ~description:"Missing key in store" - ~pp:(fun ppf keys -> - Format.fprintf ppf "Missing key in store: %s" (String.concat "/" keys)) - Data_encoding.(obj1 (req "key" (list string))) - (function Unknown keys -> Some keys | _ -> None) - (fun keys -> Unknown keys) - -let concat = String.concat "/" - -let split = String.split_on_char '/' - -let lwt_fail_error err = Lwt.fail_with (Lmdb.string_of_error err) - -let of_result = function - | Ok res -> Lwt.return res - | Error err -> lwt_fail_error err - -let ( >>=? ) v f = match v with Error err -> lwt_fail_error err | Ok v -> f v - -let init ?(readonly = false) ?mapsize path = - if not (Sys.file_exists path) then Unix.mkdir path 0o755 ; - let sync_flag = - match Sys.getenv_opt "TEZOS_STORE_SYNC" with - | None -> [] - | Some s -> ( - match String.lowercase_ascii s with - | "nosync" -> [Lmdb.NoSync] - | "nometasync" -> [Lmdb.NoMetaSync] - | _ -> - Printf.eprintf - "Unrecognized TEZOS_STORE_SYNC option : %s\n\ - allowed: nosync nometasync" - s ; - []) - in - let readonly_flag = if readonly then [Lmdb.RdOnly] else [] in - let file_flags = if readonly then 0o444 else 0o644 in - match - Lmdb.opendir - ?mapsize - ~flags:(sync_flag @ readonly_flag @ [NoTLS; NoMetaSync]) - path - file_flags - with - | Ok dir -> return {dir; parent = Lwt.new_key ()} - | Error err -> failwith "%a" Lmdb.pp_error err - -let close {dir; _} = Lmdb.closedir dir - -let known {dir; parent} key = - (match Lwt.get parent with - | Some (txn, db, _cursor) -> Lmdb.mem txn db (concat key) - | None -> Lmdb.with_ro_db dir ~f:(fun txn db -> Lmdb.mem txn db (concat key))) - |> of_result - -let read_opt {dir; parent} key = - (match Lwt.get parent with - | Some (txn, db, _cursor) -> - Lmdb.get txn db (concat key) >>| Bigstring.to_bytes - | None -> - Lmdb.with_ro_db dir ~f:(fun txn db -> - Lmdb.get txn db (concat key) >>| Bigstring.to_bytes)) - |> function - | Ok v -> Lwt.return_some v - | Error KeyNotFound -> Lwt.return_none - | Error err -> lwt_fail_error err - -let read {dir; parent} key = - (match Lwt.get parent with - | Some (txn, db, _cursor) -> - Lmdb.get txn db (concat key) >>| Bigstring.to_bytes - | None -> - Lmdb.with_ro_db dir ~f:(fun txn db -> - Lmdb.get txn db (concat key) >>| Bigstring.to_bytes)) - |> function - | Ok v -> return v - | Error _err -> fail (Unknown key) - -let store {dir; parent} k v = - let v = Bigstring.of_bytes v in - (match Lwt.get parent with - | Some (txn, db, _cursor) -> Lmdb.put txn db (concat k) v - | None -> Lmdb.with_rw_db dir ~f:(fun txn db -> Lmdb.put txn db (concat k) v)) - |> of_result - -let remove {dir; parent} k = - let remove txn db = - match Lmdb.del txn db (concat k) with - | Ok () -> Ok () - | Error KeyNotFound -> Ok () - | Error err -> Error err - in - (match Lwt.get parent with - | Some (txn, db, _cursor) -> remove txn db - | None -> Lmdb.with_rw_db dir ~f:remove) - |> of_result - -let is_prefix s s' = - String.(length s <= length s' && compare s (sub s' 0 (length s)) = 0) - -let known_dir {dir; parent} k = - let k = concat k in - let cursor_fun cursor = - Lmdb.cursor_at cursor k >>= fun () -> - Lmdb.cursor_get cursor >>| fun (first_k, _v) -> - is_prefix k (Bigstring.to_string first_k) - in - (match Lwt.get parent with - | Some (txn, db, _cursor) -> Lmdb.with_cursor txn db ~f:cursor_fun - | None -> - Lmdb.with_ro_db dir ~f:(fun txn db -> - Lmdb.with_cursor txn db ~f:cursor_fun)) - |> of_result - -let remove_dir {dir; parent} k = - let k = concat k in - let cursor_fun cursor = - Lmdb.cursor_at cursor k >>= fun () -> - Lmdb.cursor_iter cursor ~f:(fun (kk, _v) -> - let kk_string = Bigstring.to_string kk in - if is_prefix k kk_string then Lmdb.cursor_del cursor - else Error KeyNotFound) - in - (match Lwt.get parent with - | Some (txn, db, _cursor) -> Lmdb.with_cursor txn db ~f:cursor_fun - | None -> - Lmdb.with_rw_db dir ~f:(fun txn db -> - Lmdb.with_cursor txn db ~f:cursor_fun)) - |> function - | Error KeyNotFound | Ok () -> Lwt.return_unit - | Error err -> lwt_fail_error err - -let list_equal l1 l2 len = - if - len < 0 - || Compare.List_length_with.(l1 < len) - || Compare.List_length_with.(l2 < len) - then invalid_arg "list_compare: invalid len" ; - let rec inner l1 l2 len = - match (len, l1, l2) with - | (0, _, _) -> true - | (_, [], _) | (_, _, []) -> false - | (_, h1 :: t1, h2 :: t2) -> - if h1 <> h2 then false else inner t1 t2 (pred len) - in - inner l1 l2 len - -let is_child ~parent ~child = - let plen = List.length parent in - let clen = List.length child in - clen > plen && list_equal parent child plen - -let list_sub l pos len = - if len < 0 || pos < 0 || Compare.List_length_with.(l < pos + len) then - invalid_arg "list_sub" ; - let rec inner (acc, n) = function - | [] -> List.rev acc - | h :: t -> if n = 0 then List.rev acc else inner (h :: acc, pred n) t - in - inner ([], len) l - -let with_rw_cursor_lwt ?nosync ?nometasync ?flags ?name {dir; parent} ~f = - let local_parent = - match Lwt.get parent with - | None -> None - | Some (txn, _db, _cursor) -> Some txn - in - Lmdb.create_rw_txn ?nosync ?nometasync ?parent:local_parent dir - >>=? fun txn -> - Lmdb.opendb ?flags ?name txn >>=? fun db -> - Lmdb.opencursor txn db >>=? fun cursor -> - Lwt.with_value - parent - (Some (txn, db, cursor)) - (fun () -> - Lwt.try_bind - (fun () -> f cursor) - (fun res -> - Lmdb.cursor_close cursor ; - Lmdb.commit_txn txn >>=? fun () -> Lwt.return res) - (fun exn -> - Lmdb.cursor_close cursor ; - Lmdb.abort_txn txn ; - Lwt.fail exn)) - -let cursor_next_lwt cursor acc f = - match Lmdb.cursor_next cursor with - | Error KeyNotFound -> acc - | Error err -> lwt_fail_error err - | Ok () -> Lwt.bind acc f - -let cursor_at_lwt cursor k acc f = - match Lmdb.cursor_at cursor (concat k) with - | Error KeyNotFound -> acc - | Error err -> lwt_fail_error err - | Ok () -> Lwt.bind acc f - -(* assumption: store path segments have only characters different than - the separator '/', which immediately precedes '0' *) -let zero_char_str = String.make 1 (Char.chr (Char.code '/' + 1)) - -let next_key_after_subdirs l = - match List.rev l with - | [] -> [zero_char_str] - | last :: firsts -> List.rev @@ (last ^ zero_char_str) :: firsts - -module Hashtbl = Hashtbl.MakeSeeded (struct - type t = string list - - let hash = Hashtbl.seeded_hash - - let equal (d1 : string list) d2 = d1 = d2 -end) - -type key_or_dir = [`Key of key | `Dir of key] - -let fold t k ~init ~f = - let base_len = List.length k in - let rec inner ht cursor acc = - Lmdb.cursor_get cursor >>=? fun (kk, _v) -> - let kk = Bigstring.to_string kk in - let kk_split = split kk in - match is_child ~child:kk_split ~parent:k with - | false -> Lwt.return acc - | true -> - let cur_len = List.length kk_split in - if cur_len = succ base_len then - cursor_next_lwt cursor (f (`Key kk_split) acc) (inner ht cursor) - else - let dir = list_sub kk_split 0 (succ base_len) in - if Hashtbl.mem ht dir then - cursor_at_lwt - cursor - (next_key_after_subdirs dir) - (Lwt.return acc) - (inner ht cursor) - else ( - Hashtbl.add ht dir () ; - cursor_next_lwt cursor (f (`Dir dir) acc) (inner ht cursor)) - in - with_rw_cursor_lwt t ~f:(fun cursor -> - cursor_at_lwt cursor k (Lwt.return init) (fun acc -> - let ht = Hashtbl.create 31 in - inner ht cursor acc)) - -let fold_keys t k ~init ~f = - with_rw_cursor_lwt t ~f:(fun cursor -> - cursor_at_lwt - cursor - k - (Lwt.return init) - (let rec inner acc = - Lmdb.cursor_get cursor >>=? fun (kk, _v) -> - let kk = Bigstring.to_string kk in - let kk_split = split kk in - match is_child ~child:kk_split ~parent:k with - | false -> Lwt.return acc - | true -> cursor_next_lwt cursor (f kk_split acc) inner - in - inner)) - -let keys t = fold_keys t ~init:[] ~f:(fun k acc -> Lwt.return (k :: acc)) - -let open_with_atomic_rw ?mapsize path f = - let open Error_monad in - init ?mapsize path >>=? fun state -> - with_rw_cursor_lwt state ~f:(fun _c -> f state) >>=? fun res -> - close state ; - return res - -let with_atomic_rw state f = with_rw_cursor_lwt state ~f:(fun _c -> f ()) diff --git a/src/lib_store/legacy_store/raw_store.mli b/src/lib_store/legacy_store/raw_store.mli deleted file mode 100644 index f2c3ad19a14b..000000000000 --- a/src/lib_store/legacy_store/raw_store.mli +++ /dev/null @@ -1,40 +0,0 @@ -(*****************************************************************************) -(* *) -(* Open Source License *) -(* Copyright (c) 2018 Dynamic Ledger Solutions, Inc. *) -(* *) -(* Permission is hereby granted, free of charge, to any person obtaining a *) -(* copy of this software and associated documentation files (the "Software"),*) -(* to deal in the Software without restriction, including without limitation *) -(* the rights to use, copy, modify, merge, publish, distribute, sublicense, *) -(* and/or sell copies of the Software, and to permit persons to whom the *) -(* Software is furnished to do so, subject to the following conditions: *) -(* *) -(* The above copyright notice and this permission notice shall be included *) -(* in all copies or substantial portions of the Software. *) -(* *) -(* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR*) -(* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, *) -(* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL *) -(* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER*) -(* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING *) -(* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER *) -(* DEALINGS IN THE SOFTWARE. *) -(* *) -(*****************************************************************************) - -open Store_sigs - -include STORE - -val init : ?readonly:bool -> ?mapsize:int64 -> string -> t tzresult Lwt.t - -val close : t -> unit - -val with_atomic_rw : t -> (unit -> 'a Lwt.t) -> 'a Lwt.t - -val open_with_atomic_rw : - ?mapsize:int64 -> - string -> - (t -> 'a Error_monad.tzresult Lwt.t) -> - 'a tzresult Lwt.t diff --git a/src/lib_store/legacy_store/store_helpers.ml b/src/lib_store/legacy_store/store_helpers.ml deleted file mode 100644 index 345950009f84..000000000000 --- a/src/lib_store/legacy_store/store_helpers.ml +++ /dev/null @@ -1,470 +0,0 @@ -(*****************************************************************************) -(* *) -(* Open Source License *) -(* Copyright (c) 2018 Dynamic Ledger Solutions, Inc. *) -(* *) -(* Permission is hereby granted, free of charge, to any person obtaining a *) -(* copy of this software and associated documentation files (the "Software"),*) -(* to deal in the Software without restriction, including without limitation *) -(* the rights to use, copy, modify, merge, publish, distribute, sublicense, *) -(* and/or sell copies of the Software, and to permit persons to whom the *) -(* Software is furnished to do so, subject to the following conditions: *) -(* *) -(* The above copyright notice and this permission notice shall be included *) -(* in all copies or substantial portions of the Software. *) -(* *) -(* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR*) -(* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, *) -(* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL *) -(* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER*) -(* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING *) -(* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER *) -(* DEALINGS IN THE SOFTWARE. *) -(* *) -(*****************************************************************************) - -module Events = struct - include Internal_event.Simple - - let section = ["db"] - - let serializing_error = - declare_1 - ~section - ~name:"serializing_error" - ~msg:"exception while serializing value {write_error}" - ~level:Error - ~pp1:Data_encoding.Binary.pp_write_error - ("write_error", Data_encoding.Binary.write_error_encoding) -end - -open Store_sigs - -module Make_value (V : ENCODED_VALUE) = struct - type t = V.t - - let of_bytes b = - match Data_encoding.Binary.of_bytes V.encoding b with - | Error re -> - error_with "Cannot parse data: %a" Data_encoding.Binary.pp_read_error re - | Ok v -> ok v - - let to_bytes v = - match Data_encoding.Binary.to_bytes V.encoding v with - | Ok b -> b - | Error we -> - (* this debug event is asyncronous, and we don't have lwt here *) - Events.(emit__dont_wait__use_with_care serializing_error we) ; - Bytes.create 0 -end - -module Raw_value = struct - type t = Bytes.t - - let of_bytes b = ok b - - let to_bytes b = b -end - -module Make_single_store (S : STORE) (N : NAME) (V : VALUE) = struct - type t = S.t - - type value = V.t - - let known t = S.known t N.name - - let read t = S.read t N.name >>=? fun b -> Lwt.return (V.of_bytes b) - - let read_opt t = read t >|= function Error _ -> None | Ok v -> Some v - - let store t v = S.store t N.name (V.to_bytes v) - - let remove t = S.remove t N.name -end - -let map_key f = function `Key k -> `Key (f k) | `Dir k -> `Dir (f k) - -module Make_substore (S : STORE) (N : NAME) : STORE with type t = S.t = struct - type t = S.t - - type key = string list - - type value = Bytes.t - - let name_length = List.length N.name - - let to_key k = N.name @ k - - let of_key k = List.remove name_length k - - let known t k = S.known t (to_key k) - - let known_dir t k = S.known_dir t (to_key k) - - let read t k = S.read t (to_key k) - - let read_opt t k = S.read_opt t (to_key k) - - let store t k v = S.store t (to_key k) v - - let remove t k = S.remove t (to_key k) - - type key_or_dir = [`Key of key | `Dir of key] - - let fold t k ~init ~f = - S.fold t (to_key k) ~init ~f:(fun k acc -> f (map_key of_key k) acc) - - let keys t k = S.keys t (to_key k) >|= fun keys -> List.map of_key keys - - let fold_keys t k ~init ~f = - S.fold_keys t (to_key k) ~init ~f:(fun k acc -> f (of_key k) acc) - - let remove_dir t k = S.remove_dir t (to_key k) -end - -module Make_indexed_substore (S : STORE) (I : INDEX) = struct - type t = S.t - - type key = I.t - - module Store = struct - type t = S.t * I.t - - type key = string list - - type value = Bytes.t - - let to_key i k = - assert (Compare.List_length_with.(I.to_path i [] = I.path_length)) ; - I.to_path i k - - let of_key k = List.remove I.path_length k - - let known (t, i) k = S.known t (to_key i k) - - let known_dir (t, i) k = S.known_dir t (to_key i k) - - let read (t, i) k = S.read t (to_key i k) - - let read_opt (t, i) k = S.read_opt t (to_key i k) - - let store (t, i) k v = S.store t (to_key i k) v - - let remove (t, i) k = S.remove t (to_key i k) - - type key_or_dir = [`Key of key | `Dir of key] - - let fold (t, i) k ~init ~f = - S.fold t (to_key i k) ~init ~f:(fun k acc -> f (map_key of_key k) acc) - - let keys (t, i) k = - S.keys t (to_key i k) >|= fun keys -> List.map of_key keys - - let fold_keys (t, i) k ~init ~f = - S.fold_keys t (to_key i k) ~init ~f:(fun k acc -> f (of_key k) acc) - - let remove_dir (t, i) k = S.remove_dir t (to_key i k) - end - - let remove_all t i = Store.remove_dir (t, i) [] - - let fold_indexes t ~init ~f = - let rec dig i path acc = - if i <= 0 then - match I.of_path path with - | None -> assert false - | Some path -> f path acc - else - S.fold t path ~init:acc ~f:(fun k acc -> - match k with - | `Dir k -> dig (i - 1) k acc - | `Key _ -> Lwt.return acc) - in - dig I.path_length [] init - - let indexes t = - fold_indexes t ~init:[] ~f:(fun i acc -> Lwt.return (i :: acc)) - - let list t k = S.fold t k ~init:[] ~f:(fun k acc -> Lwt.return (k :: acc)) - - let resolve_index t prefix = - let rec loop i prefix = function - | [] when i = I.path_length -> ( - match I.of_path prefix with - | None -> assert false - | Some path -> Lwt.return [path]) - | [] -> - list t prefix >>= fun prefixes -> - List.map_p - (function `Key prefix | `Dir prefix -> loop (i + 1) prefix []) - prefixes - >|= List.flatten - | [d] when i = I.path_length - 1 -> - if i >= I.path_length then invalid_arg "IO.resolve" ; - list t prefix >>= fun prefixes -> - List.map_p - (function - | `Key prefix | `Dir prefix -> - let open Option in - bind (List.last_opt prefix) (fun last_prefix -> - String.remove_prefix ~prefix:d last_prefix) - |> fold ~none:Lwt.return_nil ~some:(fun _ -> - loop (i + 1) prefix [])) - prefixes - >|= List.flatten - | "" :: ds -> - list t prefix >>= fun prefixes -> - List.map_p - (function `Key prefix | `Dir prefix -> loop (i + 1) prefix ds) - prefixes - >|= List.flatten - | d :: ds -> ( - if i >= I.path_length then invalid_arg "IO.resolve" ; - S.known_dir t (prefix @ [d]) >>= function - | true -> loop (i + 1) (prefix @ [d]) ds - | false -> Lwt.return_nil) - in - loop 0 [] prefix - - module Make_set (N : NAME) = struct - type t = S.t - - type elt = I.t - - let inited = Bytes.of_string "inited" - - let known s i = Store.known (s, i) N.name - - let store s i = Store.store (s, i) N.name inited - - let remove s i = Store.remove (s, i) N.name - - let remove_all s = fold_indexes s ~init:() ~f:(fun i () -> remove s i) - - let fold s ~init ~f = - fold_indexes s ~init ~f:(fun i acc -> - known s i >>= function true -> f i acc | false -> Lwt.return acc) - - let elements s = fold s ~init:[] ~f:(fun p acc -> Lwt.return (p :: acc)) - - let iter s ~f = fold s ~init:() ~f:(fun p () -> f p) - end - - module Make_buffered_set (N : NAME) (Set : Set.S with type elt = I.t) = struct - include Make_set (N) - module Set = Set - - let read_all s = - fold s ~init:Set.empty ~f:(fun i set -> Lwt.return (Set.add i set)) - - let store_all s new_set = - read_all s >>= fun old_set -> - Set.iter_p (remove s) (Set.diff old_set new_set) >>= fun () -> - Set.iter_p (store s) (Set.diff new_set old_set) - end - - module Make_map (N : NAME) (V : VALUE) = struct - type t = S.t - - type key = I.t - - type value = V.t - - let known s i = Store.known (s, i) N.name - - let read s i = - Store.read (s, i) N.name >>=? fun b -> Lwt.return (V.of_bytes b) - - let read_opt s i = - read s i >>= function - | Error _ -> Lwt.return_none - | Ok v -> Lwt.return_some v - - let store s i v = Store.store (s, i) N.name (V.to_bytes v) - - let remove s i = Store.remove (s, i) N.name - - let remove_all s = fold_indexes s ~init:() ~f:(fun i () -> remove s i) - - let fold s ~init ~f = - fold_indexes s ~init ~f:(fun i acc -> - read_opt s i >>= function - | None -> Lwt.return acc - | Some v -> f i v acc) - - let bindings s = - fold s ~init:[] ~f:(fun p v acc -> Lwt.return ((p, v) :: acc)) - - let iter s ~f = fold s ~init:() ~f:(fun p v () -> f p v) - - let fold_keys s ~init ~f = - fold_indexes s ~init ~f:(fun i acc -> - known s i >>= function false -> Lwt.return acc | true -> f i acc) - - let keys s = fold_keys s ~init:[] ~f:(fun p acc -> Lwt.return (p :: acc)) - - let iter_keys s ~f = fold_keys s ~init:() ~f:(fun p () -> f p) - end - - module Make_buffered_map - (N : NAME) - (V : VALUE) - (Map : Map.S with type key = I.t) = - struct - include Make_map (N) (V) - module Map = Map - - let read_all s = - fold s ~init:Map.empty ~f:(fun i v set -> Lwt.return (Map.add i v set)) - - let store_all s map = remove_all s >>= fun () -> Map.iter_p (store s) map - end -end - -module Make_set (S : STORE) (I : INDEX) = struct - type t = S.t - - type elt = I.t - - let inited = Bytes.of_string "inited" - - let known s i = S.known s (I.to_path i []) - - let store s i = S.store s (I.to_path i []) inited - - let remove s i = S.remove s (I.to_path i []) - - let remove_all s = S.remove_dir s [] - - let fold s ~init ~f = - let rec dig i path acc = - if i <= 1 then - S.fold s path ~init:acc ~f:(fun k acc -> - match k with - | `Dir _ -> Lwt.return acc - | `Key file -> ( - match I.of_path file with - | None -> assert false - | Some p -> f p acc)) - else - S.fold s path ~init:acc ~f:(fun k acc -> - match k with - | `Dir k -> dig (i - 1) k acc - | `Key _ -> Lwt.return acc) - in - dig I.path_length [] init - - let elements s = fold s ~init:[] ~f:(fun p acc -> Lwt.return (p :: acc)) - - let iter s ~f = fold s ~init:() ~f:(fun p () -> f p) -end - -module Make_buffered_set - (S : STORE) - (I : INDEX) - (Set : Set.S with type elt = I.t) = -struct - include Make_set (S) (I) - module Set = Set - - let read_all s = - fold s ~init:Set.empty ~f:(fun i set -> Lwt.return (Set.add i set)) - - let store_all s new_set = - read_all s >>= fun old_set -> - Set.iter_p (remove s) (Set.diff old_set new_set) >>= fun () -> - Set.iter_p (store s) (Set.diff new_set old_set) -end - -module Make_map (S : STORE) (I : INDEX) (V : VALUE) = struct - type t = S.t - - type key = I.t - - type value = V.t - - let known s i = S.known s (I.to_path i []) - - let read s i = - S.read s (I.to_path i []) >>=? fun b -> Lwt.return (V.of_bytes b) - - let read_opt s i = - read s i >>= function - | Error _ -> Lwt.return_none - | Ok v -> Lwt.return_some v - - let store s i v = S.store s (I.to_path i []) (V.to_bytes v) - - let remove s i = S.remove s (I.to_path i []) - - let remove_all s = S.remove_dir s [] - - let fold s ~init ~f = - let rec dig i path acc = - if i <= 1 then - S.fold s path ~init:acc ~f:(fun k acc -> - match k with - | `Dir _ -> Lwt.return acc - | `Key file -> ( - S.read_opt s file >>= function - | None -> Lwt.return acc - | Some b -> ( - match V.of_bytes b with - | Error _ -> - (* Silently ignore unparsable data *) - Lwt.return acc - | Ok v -> ( - match I.of_path file with - | None -> assert false - | Some path -> f path v acc)))) - else - S.fold s path ~init:acc ~f:(fun k acc -> - match k with - | `Dir k -> dig (i - 1) k acc - | `Key _ -> Lwt.return acc) - in - dig I.path_length [] init - - let bindings s = - fold s ~init:[] ~f:(fun p v acc -> Lwt.return ((p, v) :: acc)) - - let iter s ~f = fold s ~init:() ~f:(fun p v () -> f p v) - - let fold_keys s ~init ~f = - S.fold s [] ~init ~f:(fun p acc -> - match p with - | `Dir _ -> Lwt.return acc - | `Key p -> ( - match I.of_path p with - | None -> assert false - | Some path -> f path acc)) - - let keys s = fold_keys s ~init:[] ~f:(fun p acc -> Lwt.return (p :: acc)) - - let iter_keys s ~f = fold_keys s ~init:() ~f:(fun p () -> f p) -end - -module Make_buffered_map - (S : STORE) - (I : INDEX) - (V : VALUE) - (Map : Map.S with type key = I.t) = -struct - include Make_map (S) (I) (V) - module Map = Map - - let read_all s = - fold s ~init:Map.empty ~f:(fun i v set -> Lwt.return (Map.add i v set)) - - let store_all s map = remove_all s >>= fun () -> Map.iter_p (store s) map -end - -module Integer_index = struct - type t = int - - let path_length = 1 - - let to_path x l = string_of_int x :: l - - let of_path = function [x] -> int_of_string_opt x | _ -> None -end diff --git a/src/lib_store/legacy_store/store_helpers.mli b/src/lib_store/legacy_store/store_helpers.mli deleted file mode 100644 index db07d6951f3b..000000000000 --- a/src/lib_store/legacy_store/store_helpers.mli +++ /dev/null @@ -1,63 +0,0 @@ -(*****************************************************************************) -(* *) -(* Open Source License *) -(* Copyright (c) 2018 Dynamic Ledger Solutions, Inc. *) -(* *) -(* Permission is hereby granted, free of charge, to any person obtaining a *) -(* copy of this software and associated documentation files (the "Software"),*) -(* to deal in the Software without restriction, including without limitation *) -(* the rights to use, copy, modify, merge, publish, distribute, sublicense, *) -(* and/or sell copies of the Software, and to permit persons to whom the *) -(* Software is furnished to do so, subject to the following conditions: *) -(* *) -(* The above copyright notice and this permission notice shall be included *) -(* in all copies or substantial portions of the Software. *) -(* *) -(* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR*) -(* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, *) -(* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL *) -(* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER*) -(* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING *) -(* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER *) -(* DEALINGS IN THE SOFTWARE. *) -(* *) -(*****************************************************************************) - -open Store_sigs - -module Make_value (V : ENCODED_VALUE) : VALUE with type t = V.t - -module Raw_value : VALUE with type t = Bytes.t - -module Make_single_store (S : STORE) (N : NAME) (V : VALUE) : - SINGLE_STORE with type t = S.t and type value = V.t - -module Make_substore (S : STORE) (N : NAME) : STORE with type t = S.t - -module Make_set (S : STORE) (I : INDEX) : - SET_STORE with type t = S.t and type elt = I.t - -module Make_buffered_set - (S : STORE) - (I : INDEX) - (Set : Set.S with type elt = I.t) : - BUFFERED_SET_STORE with type t = S.t and type elt = I.t and module Set = Set - -module Make_map (S : STORE) (I : INDEX) (V : VALUE) : - MAP_STORE with type t = S.t and type key = I.t and type value = V.t - -module Make_buffered_map - (S : STORE) - (I : INDEX) - (V : VALUE) - (Map : Map.S with type key = I.t) : - BUFFERED_MAP_STORE - with type t = S.t - and type key = I.t - and type value = V.t - and module Map = Map - -module Make_indexed_substore (S : STORE) (I : INDEX) : - INDEXED_STORE with type t = S.t and type key = I.t - -module Integer_index : INDEX with type t = int diff --git a/src/lib_store/legacy_store/store_sigs.ml b/src/lib_store/legacy_store/store_sigs.ml deleted file mode 100644 index 853697250387..000000000000 --- a/src/lib_store/legacy_store/store_sigs.ml +++ /dev/null @@ -1,204 +0,0 @@ -(*****************************************************************************) -(* *) -(* Open Source License *) -(* Copyright (c) 2018 Dynamic Ledger Solutions, Inc. *) -(* *) -(* Permission is hereby granted, free of charge, to any person obtaining a *) -(* copy of this software and associated documentation files (the "Software"),*) -(* to deal in the Software without restriction, including without limitation *) -(* the rights to use, copy, modify, merge, publish, distribute, sublicense, *) -(* and/or sell copies of the Software, and to permit persons to whom the *) -(* Software is furnished to do so, subject to the following conditions: *) -(* *) -(* The above copyright notice and this permission notice shall be included *) -(* in all copies or substantial portions of the Software. *) -(* *) -(* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR*) -(* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, *) -(* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL *) -(* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER*) -(* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING *) -(* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER *) -(* DEALINGS IN THE SOFTWARE. *) -(* *) -(*****************************************************************************) - -module type NAME = sig - val name : string list -end - -module type VALUE = sig - type t - - val of_bytes : Bytes.t -> t tzresult - - val to_bytes : t -> Bytes.t -end - -module type ENCODED_VALUE = sig - type t - - val encoding : t Data_encoding.t -end - -module type INDEX = sig - type t - - val path_length : int - - val to_path : t -> string list -> string list - - val of_path : string list -> t option -end - -module type SINGLE_STORE = sig - type t - - type value - - val known : t -> bool Lwt.t - - val read : t -> value tzresult Lwt.t - - val read_opt : t -> value option Lwt.t - - val store : t -> value -> unit Lwt.t - - val remove : t -> unit Lwt.t -end - -module type STORE = sig - type t - - type key = string list - - type value = Bytes.t - - val known : t -> key -> bool Lwt.t - - val read : t -> key -> value tzresult Lwt.t - - val read_opt : t -> key -> value option Lwt.t - - val store : t -> key -> value -> unit Lwt.t - - val remove : t -> key -> unit Lwt.t - - val known_dir : t -> key -> bool Lwt.t - - val remove_dir : t -> key -> unit Lwt.t - - type key_or_dir = [`Key of key | `Dir of key] - - val fold : t -> key -> init:'a -> f:(key_or_dir -> 'a -> 'a Lwt.t) -> 'a Lwt.t - - val keys : t -> key -> key list Lwt.t - - val fold_keys : t -> key -> init:'a -> f:(key -> 'a -> 'a Lwt.t) -> 'a Lwt.t -end - -module type SET_STORE = sig - type t - - type elt - - val known : t -> elt -> bool Lwt.t - - val store : t -> elt -> unit Lwt.t - - val remove : t -> elt -> unit Lwt.t - - val elements : t -> elt list Lwt.t - - val remove_all : t -> unit Lwt.t - - val iter : t -> f:(elt -> unit Lwt.t) -> unit Lwt.t - - val fold : t -> init:'a -> f:(elt -> 'a -> 'a Lwt.t) -> 'a Lwt.t -end - -module type BUFFERED_SET_STORE = sig - include SET_STORE - - module Set : Set.S with type elt = elt - - val read_all : t -> Set.t Lwt.t - - val store_all : t -> Set.t -> unit Lwt.t -end - -module type MAP_STORE = sig - type t - - type key - - type value - - val known : t -> key -> bool Lwt.t - - val read : t -> key -> value tzresult Lwt.t - - val read_opt : t -> key -> value option Lwt.t - - val store : t -> key -> value -> unit Lwt.t - - val remove : t -> key -> unit Lwt.t - - val keys : t -> key list Lwt.t - - val bindings : t -> (key * value) list Lwt.t - - val remove_all : t -> unit Lwt.t - - val iter : t -> f:(key -> value -> unit Lwt.t) -> unit Lwt.t - - val iter_keys : t -> f:(key -> unit Lwt.t) -> unit Lwt.t - - val fold : t -> init:'a -> f:(key -> value -> 'a -> 'a Lwt.t) -> 'a Lwt.t - - val fold_keys : t -> init:'a -> f:(key -> 'a -> 'a Lwt.t) -> 'a Lwt.t -end - -module type BUFFERED_MAP_STORE = sig - include MAP_STORE - - module Map : Map.S with type key = key - - val read_all : t -> value Map.t Lwt.t - - val store_all : t -> value Map.t -> unit Lwt.t -end - -module type INDEXED_STORE = sig - type t - - type key - - module Store : STORE with type t = t * key - - val remove_all : t -> key -> unit Lwt.t - - val fold_indexes : t -> init:'a -> f:(key -> 'a -> 'a Lwt.t) -> 'a Lwt.t - - val indexes : t -> key list Lwt.t - - val resolve_index : t -> string list -> key list Lwt.t - - module Make_set (N : NAME) : SET_STORE with type t = t and type elt = key - - module Make_buffered_set (N : NAME) (Set : Set.S with type elt = key) : - BUFFERED_SET_STORE with type t = t and type elt = key and module Set = Set - - module Make_map (N : NAME) (V : VALUE) : - MAP_STORE with type t = t and type key = key and type value = V.t - - module Make_buffered_map - (N : NAME) - (V : VALUE) - (Map : Map.S with type key = key) : - BUFFERED_MAP_STORE - with type t = t - and type key = key - and type value = V.t - and module Map = Map -end diff --git a/src/lib_store/legacy_store/test/.ocamlformat b/src/lib_store/legacy_store/test/.ocamlformat deleted file mode 100644 index 5e1158919e85..000000000000 --- a/src/lib_store/legacy_store/test/.ocamlformat +++ /dev/null @@ -1,17 +0,0 @@ -version=0.18.0 -wrap-fun-args=false -let-binding-spacing=compact -field-space=loose -break-separators=after -space-around-arrays=false -space-around-lists=false -space-around-records=false -space-around-variants=false -dock-collection-brackets=true -space-around-records=false -sequence-style=separator -doc-comments=before -margin=80 -module-item-spacing=sparse -parens-tuple=always -parens-tuple-patterns=always diff --git a/src/lib_store/legacy_store/test/assert.ml b/src/lib_store/legacy_store/test/assert.ml deleted file mode 100644 index fdef808d32c8..000000000000 --- a/src/lib_store/legacy_store/test/assert.ml +++ /dev/null @@ -1,85 +0,0 @@ -(*****************************************************************************) -(* *) -(* Open Source License *) -(* Copyright (c) 2018 Dynamic Ledger Solutions, Inc. *) -(* *) -(* Permission is hereby granted, free of charge, to any person obtaining a *) -(* copy of this software and associated documentation files (the "Software"),*) -(* to deal in the Software without restriction, including without limitation *) -(* the rights to use, copy, modify, merge, publish, distribute, sublicense, *) -(* and/or sell copies of the Software, and to permit persons to whom the *) -(* Software is furnished to do so, subject to the following conditions: *) -(* *) -(* The above copyright notice and this permission notice shall be included *) -(* in all copies or substantial portions of the Software. *) -(* *) -(* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR*) -(* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, *) -(* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL *) -(* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER*) -(* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING *) -(* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER *) -(* DEALINGS IN THE SOFTWARE. *) -(* *) -(*****************************************************************************) - -let fail expected given msg = - Format.kasprintf - Stdlib.failwith - "@[%s@ expected: %s@ got: %s@]" - msg - expected - given - -let fail_msg fmt = Format.kasprintf (fail "" "") fmt - -let default_printer _ = "" - -let equal ?(eq = ( = )) ?(prn = default_printer) ?(msg = "") x y = - if not (eq x y) then fail (prn x) (prn y) msg - -let equal_string ?msg s1 s2 = equal ?msg ~prn:(fun s -> s) s1 s2 - -let equal_string_option ?msg o1 o2 = - let prn = function None -> "None" | Some s -> s in - equal ?msg ~prn o1 o2 - -let is_none ?(msg = "") x = if x <> None then fail "None" "Some _" msg - -let make_equal_list eq prn ?(msg = "") x y = - let rec iter i x y = - match (x, y) with - | (hd_x :: tl_x, hd_y :: tl_y) -> - if eq hd_x hd_y then iter (succ i) tl_x tl_y - else - let fm = Printf.sprintf "%s (at index %d)" msg i in - fail (prn hd_x) (prn hd_y) fm - | (_ :: _, []) | ([], _ :: _) -> - let fm = Printf.sprintf "%s (lists of different sizes)" msg in - fail_msg "%s" fm - | ([], []) -> () - in - iter 0 x y - -let equal_string_list ?msg l1 l2 = make_equal_list ?msg ( = ) (fun x -> x) l1 l2 - -let equal_string_list_list ?msg l1 l2 = - let pr_persist l = - let res = String.concat ";" (List.map (fun s -> Printf.sprintf "%S" s) l) in - Printf.sprintf "[%s]" res - in - make_equal_list ?msg ( = ) pr_persist l1 l2 - -let equal_key_dir_list ?msg l1 l2 = - make_equal_list - ?msg - ( = ) - (function - | `Key k -> "Key " ^ String.concat "/" k - | `Dir k -> "Dir " ^ String.concat "/" k) - l1 - l2 - -let equal_context_hash_list ?msg l1 l2 = - let pr_persist hash = Printf.sprintf "[%s]" @@ Context_hash.to_string hash in - make_equal_list ?msg Context_hash.( = ) pr_persist l1 l2 diff --git a/src/lib_store/legacy_store/test/dune b/src/lib_store/legacy_store/test/dune deleted file mode 100644 index ee5bb4b7b9e8..000000000000 --- a/src/lib_store/legacy_store/test/dune +++ /dev/null @@ -1,19 +0,0 @@ -(executable - (name test) - (libraries - tezos-base - tezos-legacy-store - tezos-stdlib-unix - alcotest-lwt) - (flags - (:standard - -open Tezos_base__TzPervasives - -open Tezos_legacy_store - -open Tezos_stdlib_unix))) - -(alias (name buildtest) (deps test.exe)) - -(rule - (alias runtest) - (package tezos-legacy-store) - (action (chdir %{workspace_root} (run %{exe:test.exe})))) diff --git a/src/lib_store/legacy_store/test/test.ml b/src/lib_store/legacy_store/test/test.ml deleted file mode 100644 index b25d3bf0442a..000000000000 --- a/src/lib_store/legacy_store/test/test.ml +++ /dev/null @@ -1,31 +0,0 @@ -(*****************************************************************************) -(* *) -(* Open Source License *) -(* Copyright (c) 2018 Dynamic Ledger Solutions, Inc. *) -(* Copyright (c) 2020 Nomadic Labs, *) -(* *) -(* Permission is hereby granted, free of charge, to any person obtaining a *) -(* copy of this software and associated documentation files (the "Software"),*) -(* to deal in the Software without restriction, including without limitation *) -(* the rights to use, copy, modify, merge, publish, distribute, sublicense, *) -(* and/or sell copies of the Software, and to permit persons to whom the *) -(* Software is furnished to do so, subject to the following conditions: *) -(* *) -(* The above copyright notice and this permission notice shall be included *) -(* in all copies or substantial portions of the Software. *) -(* *) -(* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR*) -(* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, *) -(* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL *) -(* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER*) -(* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING *) -(* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER *) -(* DEALINGS IN THE SOFTWARE. *) -(* *) -(*****************************************************************************) - -let () = - Lwt_main.run - (Alcotest_lwt.run - "tezos-legacy-store" - [("raw_store", Test_raw_store.tests)]) diff --git a/src/lib_store/legacy_store/test/test_raw_store.ml b/src/lib_store/legacy_store/test/test_raw_store.ml deleted file mode 100644 index 16c253f64f5a..000000000000 --- a/src/lib_store/legacy_store/test/test_raw_store.ml +++ /dev/null @@ -1,94 +0,0 @@ -(*****************************************************************************) -(* *) -(* Open Source License *) -(* Copyright (c) 2018 Dynamic Ledger Solutions, Inc. *) -(* *) -(* Permission is hereby granted, free of charge, to any person obtaining a *) -(* copy of this software and associated documentation files (the "Software"),*) -(* to deal in the Software without restriction, including without limitation *) -(* the rights to use, copy, modify, merge, publish, distribute, sublicense, *) -(* and/or sell copies of the Software, and to permit persons to whom the *) -(* Software is furnished to do so, subject to the following conditions: *) -(* *) -(* The above copyright notice and this permission notice shall be included *) -(* in all copies or substantial portions of the Software. *) -(* *) -(* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR*) -(* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, *) -(* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL *) -(* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER*) -(* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING *) -(* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER *) -(* DEALINGS IN THE SOFTWARE. *) -(* *) -(*****************************************************************************) - -(** Testing - ------- - Component: Raw Store - Invocation: dune build @src/lib_storage/runtest - Subject: On (key, value) storage. -*) - -open Raw_store - -let ( >>= ) = Lwt.bind - -let ( >|= ) = Lwt.( >|= ) - -open Filename.Infix - -let wrap_store_init f _ () = - Lwt_utils_unix.with_tempdir "tezos_test_" (fun base_dir -> - let root = base_dir // "store" in - init ~mapsize:4_096_000L root >>= function - | Error _ -> Assert.fail_msg "wrap_store_init" - | Ok store -> f store) - -let entries s k = - fold s k ~init:[] ~f:(fun e acc -> Lwt.return (e :: acc)) >|= List.rev - -(** Stores various (key, value) associations from type [string list] to - [bytes]. Use function [entries] to filter out keys by prefix. Then, - assert that such lists contain the right keys with [`Key] or [`Dir] - for key prefixes. -*) -let test_fold st = - store st ["a"; "b"] (Bytes.of_string "Novembre") >>= fun _ -> - store st ["a"; "c"] (Bytes.of_string "Juin") >>= fun _ -> - store st ["a"; "d"; "e"] (Bytes.of_string "Septembre") >>= fun _ -> - store st ["f"] (Bytes.of_string "Avril") >>= fun _ -> - (* The code of '.' is just below the one of '/' ! *) - store st ["g"; ".12"; "a"] (Bytes.of_string "Mai") >>= fun _ -> - store st ["g"; ".12"; "b"] (Bytes.of_string "Février") >>= fun _ -> - store st ["g"; "123"; "456"] (Bytes.of_string "Mars") >>= fun _ -> - store st ["g"; "1230"] (Bytes.of_string "Janvier") >>= fun _ -> - entries st [] >>= fun l -> - Assert.equal_key_dir_list ~msg:__LOC__ [`Dir ["a"]; `Key ["f"]; `Dir ["g"]] l ; - entries st ["0"] >>= fun l -> - Assert.equal_key_dir_list ~msg:__LOC__ [] l ; - entries st ["0"; "1"] >>= fun l -> - Assert.equal_key_dir_list ~msg:__LOC__ [] l ; - entries st ["a"] >>= fun l -> - Assert.equal_key_dir_list - ~msg:__LOC__ - [`Key ["a"; "b"]; `Key ["a"; "c"]; `Dir ["a"; "d"]] - l ; - entries st ["a"; "d"] >>= fun l -> - Assert.equal_key_dir_list ~msg:__LOC__ [`Key ["a"; "d"; "e"]] l ; - entries st ["f"] >>= fun l -> - Assert.equal_key_dir_list ~msg:__LOC__ [] l ; - entries st ["f"; "z"] >>= fun l -> - Assert.equal_key_dir_list ~msg:__LOC__ [] l ; - entries st ["g"] >>= fun l -> - Assert.equal_key_dir_list - ~msg:__LOC__ - [`Dir ["g"; ".12"]; `Dir ["g"; "123"]; `Key ["g"; "1230"]] - l ; - entries st ["g"; "123"] >>= fun l -> - Assert.equal_key_dir_list ~msg:__LOC__ [`Key ["g"; "123"; "456"]] l ; - entries st ["z"] >>= fun l -> - Assert.equal_key_dir_list ~msg:__LOC__ [] l ; - Lwt.return_unit - -let tests = [Alcotest_lwt.test_case "fold" `Quick (wrap_store_init test_fold)] diff --git a/src/lib_store/legacy_store/tezos-legacy-store.opam b/src/lib_store/legacy_store/tezos-legacy-store.opam deleted file mode 100644 index f588145e93f0..000000000000 --- a/src/lib_store/legacy_store/tezos-legacy-store.opam +++ /dev/null @@ -1,26 +0,0 @@ -opam-version: "2.0" -maintainer: "contact@tezos.com" -authors: ["Tezos devteam"] -homepage: "https://www.tezos.com/" -bug-reports: "https://gitlab.com/tezos/tezos/issues" -dev-repo: "git+https://gitlab.com/tezos/tezos.git" -license: "MIT" -depends: [ - "dune" { >= "2.9" } - "tezos-base" - "tezos-shell-services" - "tezos-stdlib-unix" - "tezos-protocol-updater" - "tezos-lmdb" - "tezos-validation" - "tezos-context" - "lwt-watcher" { = "0.1" } - "alcotest-lwt" { with-test & >= "1.5.0" } - "tezos-embedded-protocol-alpha" - "tezos-embedded-protocol-genesis" -] -build: [ - ["dune" "build" "-p" name "-j" jobs] - ["dune" "runtest" "-p" name "-j" jobs] {with-test} -] -synopsis: "Tezos: legacy low-level key-value store for `tezos-node`" diff --git a/src/lib_store/store.ml b/src/lib_store/store.ml index 6c62d84816d2..533b64ab30b0 100644 --- a/src/lib_store/store.ml +++ b/src/lib_store/store.ml @@ -2849,215 +2849,4 @@ module Unsafe = struct let chain_config = {history_mode; genesis; expiration = None} in Stored_data.write_file (Naming.chain_config_file chain_dir) chain_config >>=? fun () -> return_unit - - let restore_from_legacy_snapshot ?(notify = fun () -> Lwt.return_unit) - store_dir ~context_index ~genesis ~genesis_context_hash - ~floating_blocks_stream ~new_head_with_metadata ~partial_protocol_levels - ~history_mode = - let chain_id = Chain_id.of_block_hash genesis.Genesis.block in - let chain_dir = Naming.chain_dir store_dir chain_id in - let genesis_block = - Block_repr.create_genesis_block ~genesis genesis_context_hash - in - let new_head_descr = - ( Block_repr.hash new_head_with_metadata, - Block_repr.level new_head_with_metadata ) - in - (* Write consistent stored data *) - (* We will write protocol levels when we have access to blocks to - retrieve necessary infos *) - Stored_data.write_file - (Naming.current_head_file chain_dir) - (Block.descriptor new_head_with_metadata) - >>=? fun () -> - Stored_data.write_file (Naming.alternate_heads_file chain_dir) [] - >>=? fun () -> - (* Checkpoint is the new head *) - Stored_data.write_file (Naming.checkpoint_file chain_dir) new_head_descr - >>=? fun () -> - (* Cementing highwatermark is the new head *) - Stored_data.write_file (Naming.cementing_highwatermark_file chain_dir) None - >>=? fun () -> - Stored_data.write_file (Naming.target_file chain_dir) None >>=? fun () -> - (* Savepoint is the head *) - Stored_data.write_file (Naming.savepoint_file chain_dir) new_head_descr - >>=? fun () -> - (* Depending on the history mode, set the caboose properly *) - (match history_mode with - | History_mode.Archive | Full _ -> return genesis_block - | Rolling _ -> ( - Lwt_stream.peek floating_blocks_stream >>= function - | None -> - (* This should not happen. It is ensured, by construction - when exporting a (valid) snapshot. *) - assert false - | Some caboose -> return caboose)) - >>=? fun caboose -> - let caboose_descr = Block.descriptor caboose in - Stored_data.write_file (Naming.caboose_file chain_dir) caboose_descr - >>=? fun () -> - Stored_data.write_file - (Naming.invalid_blocks_file chain_dir) - Block_hash.Map.empty - >>=? fun () -> - Stored_data.write_file - (Naming.forked_chains_file chain_dir) - Chain_id.Map.empty - >>=? fun () -> - Stored_data.write_file (Naming.genesis_block_file chain_dir) genesis_block - >>=? fun () -> - (* Load the store (containing the cemented if relevant) *) - Block_store.load chain_dir ~genesis_block ~readonly:false - >>=? fun block_store -> - (* Store the floating *) - Lwt_stream.iter_s - (fun block -> - Block_store.store_block block_store block >>= fun _ -> notify ()) - floating_blocks_stream - >>= fun () -> - (* Store the head *) - Block_store.store_block block_store new_head_with_metadata >>=? fun () -> - notify () >>= fun () -> - (* We also need to store the genesis' protocol transition *) - Chain.get_commit_info context_index (Block.header genesis_block) - >>=? fun genesis_commit_info -> - let initial_protocol_levels = - Protocol_levels.( - add - (Block.proto_level genesis_block) - { - block = Block.descriptor genesis_block; - protocol = genesis.protocol; - commit_info = Some genesis_commit_info; - } - empty) - in - let new_head_context_hash = Block.context_hash new_head_with_metadata in - (Context.checkout context_index new_head_context_hash >>= function - | Some ctxt -> return ctxt - | None -> - fail - (Cannot_checkout_context - (Block_repr.hash new_head_with_metadata, new_head_context_hash))) - >>=? fun context -> - Context.get_protocol context >>= fun head_protocol -> - (* Compute protocol levels and check their correctness *) - List.fold_left_es - (fun proto_levels (transition_level, protocol_hash, commit_info_opt) -> - let distance = - Int32.( - to_int - (sub (Block_repr.level new_head_with_metadata) transition_level)) - in - Block_store.read_block - block_store - ~read_metadata:false - (Block (Block_repr.hash new_head_with_metadata, distance)) - >>=? fun block_opt -> - match (block_opt, commit_info_opt) with - | (None, _) -> ( - match history_mode with - | Rolling _ -> - (* Corner-case for when the head protocol's transition - block has been deleted. *) - let block = - (* Important: we cannot retrieve the protocol - associated to an arbitrary block with a legacy - snapshot, we only know the head's one as it's - written in the context. Therefore, the transition - block is overwritten with either the caboose if - both blocks have the same proto_level or the - current_head otherwise. In the former case, block's - protocol data won't be deserialisable. *) - if - Compare.Int.( - Block.proto_level caboose - = Block.proto_level new_head_with_metadata) - then caboose_descr - else Block.descriptor new_head_with_metadata - in - if Protocol_hash.equal protocol_hash head_protocol then - return - Protocol_levels.( - add - (Block.proto_level new_head_with_metadata) - {block; protocol = protocol_hash; commit_info = None} - proto_levels) - else return proto_levels - | _ -> - fail_unless - Compare.Int32.(transition_level = Block.level genesis_block) - (Missing_activation_block_legacy - (transition_level, protocol_hash, history_mode)) - >>=? fun () -> - (* genesis commit info was already added *) - return proto_levels) - | (Some block, None) -> - return - Protocol_levels.( - add - (Block.proto_level block) - { - block = Block.descriptor block; - protocol = protocol_hash; - commit_info = commit_info_opt; - } - proto_levels) - | (Some block, Some commit_info) -> - let open Protocol_levels in - Context.check_protocol_commit_consistency - context_index - ~expected_context_hash:(Block.context_hash block) - ~given_protocol_hash:protocol_hash - ~author:commit_info.author - ~message:commit_info.message - ~timestamp:(Block.timestamp block) - ~test_chain_status:commit_info.test_chain_status - ~predecessor_block_metadata_hash: - commit_info.predecessor_block_metadata_hash - ~predecessor_ops_metadata_hash: - commit_info.predecessor_ops_metadata_hash - ~data_merkle_root:commit_info.data_merkle_root - ~parents_contexts:commit_info.parents_contexts - >>= fun is_consistent -> - if - is_consistent || Compare.Int32.(equal (Block_repr.level block) 0l) - then - return - Protocol_levels.( - add - (Block_repr.proto_level block) - { - block = Block.descriptor block; - protocol = protocol_hash; - commit_info = commit_info_opt; - } - proto_levels) - else - fail - (Inconsistent_protocol_commit_info - (Block.hash block, protocol_hash))) - initial_protocol_levels - partial_protocol_levels - >>=? fun protocol_levels -> - Stored_data.write_file - (Naming.protocol_levels_file chain_dir) - protocol_levels - >>=? fun () -> - Block_store.close block_store >>= fun () -> - let chain_config = {history_mode; genesis; expiration = None} in - Stored_data.write_file (Naming.chain_config_file chain_dir) chain_config - >>=? fun () -> return_unit - - let restore_from_legacy_upgrade store_dir ~genesis ~invalid_blocks - ~forked_chains = - let chain_id = Chain_id.of_block_hash genesis.Genesis.block in - let chain_dir = Naming.chain_dir store_dir chain_id in - (* We don't import branches, only a linear history, thus we don't - write any alternate heads *) - Stored_data.write_file (Naming.alternate_heads_file chain_dir) [] - >>=? fun () -> - Stored_data.write_file (Naming.invalid_blocks_file chain_dir) invalid_blocks - >>=? fun () -> - Stored_data.write_file (Naming.forked_chains_file chain_dir) forked_chains end diff --git a/src/lib_store/store.mli b/src/lib_store/store.mli index abb8c475d28e..4d80af25b548 100644 --- a/src/lib_store/store.mli +++ b/src/lib_store/store.mli @@ -1073,27 +1073,4 @@ module Unsafe : sig protocol_levels:Protocol_levels.activation_block Protocol_levels.t -> history_mode:History_mode.t -> unit tzresult Lwt.t - - (** [restore_from_snapshot_legacy ...] same as - [restore_from_snapshot] but slightly differs due to some - information missing from legacy snapshots. *) - val restore_from_legacy_snapshot : - ?notify:(unit -> unit Lwt.t) -> - [`Store_dir] Naming.directory -> - context_index:Context.index -> - genesis:Genesis.t -> - genesis_context_hash:Context_hash.t -> - floating_blocks_stream:Block_repr.block Lwt_stream.t -> - new_head_with_metadata:Block_repr.block -> - partial_protocol_levels: - (int32 * Protocol_hash.t * Protocol_levels.commit_info option) list -> - history_mode:History_mode.t -> - unit tzresult Lwt.t - - val restore_from_legacy_upgrade : - [`Store_dir] Naming.directory -> - genesis:Genesis.t -> - invalid_blocks:invalid_block Block_hash.Map.t -> - forked_chains:Block_hash.t Chain_id.Map.t -> - unit tzresult Lwt.t end diff --git a/src/lib_store/test/dune b/src/lib_store/test/dune index 61f32d257857..c7ec69948cc8 100644 --- a/src/lib_store/test/dune +++ b/src/lib_store/test/dune @@ -1,9 +1,8 @@ (executables - (names test test_legacy legacy_store_maker) + (names test) (libraries tezos-base tezos-store - tezos-legacy-store tezos-stdlib-unix tezos-embedded-protocol-demo-noops tezos-embedded-protocol-genesis @@ -16,36 +15,12 @@ (:standard -open Tezos_base__TzPervasives -open Tezos_store - -open Tezos_legacy_store -open Tezos_shell_services -open Tezos_stdlib_unix -open Tezos_validation -open Tezos_protocol_alpha_parameters -open Tezos_protocol_plugin_alpha))) -(rule - (alias buildtest) - (deps ../legacy_store/legacy_store_builder.exe test.exe test_legacy.exe) - (action (progn))) - (rule (alias runtest_store) - (action (setenv SLOW_TEST false (run %{exe:test.exe})))) - -(rule - (alias runtest_store_legacy) - (deps (:legacy_builder ../legacy_store/legacy_store_builder.exe)) - (action (run %{exe:test_legacy.exe} --builder-path %{legacy_builder}))) - -(rule - (alias runtest_store_legacy_slow) - (deps (:legacy_builder ../legacy_store/legacy_store_builder.exe)) - (action - (setenv SLOW_TEST true - (run %{exe:test_legacy.exe} --builder-path %{legacy_builder})))) - -(rule - (alias runtest) - (package tezos-store) - (deps (alias runtest_store) (alias runtest_store_legacy)) - (action (progn))) + (action (setenv SLOW_TEST false (run %{exe:test.exe})))) \ No newline at end of file diff --git a/src/lib_store/test/legacy_store_maker.ml b/src/lib_store/test/legacy_store_maker.ml deleted file mode 100644 index 35712fa136de..000000000000 --- a/src/lib_store/test/legacy_store_maker.ml +++ /dev/null @@ -1,112 +0,0 @@ -(*****************************************************************************) -(* *) -(* Open Source License *) -(* Copyright (c) 2020 Nomadic Labs, *) -(* *) -(* Permission is hereby granted, free of charge, to any person obtaining a *) -(* copy of this software and associated documentation files (the "Software"),*) -(* to deal in the Software without restriction, including without limitation *) -(* the rights to use, copy, modify, merge, publish, distribute, sublicense, *) -(* and/or sell copies of the Software, and to permit persons to whom the *) -(* Software is furnished to do so, subject to the following conditions: *) -(* *) -(* The above copyright notice and this permission notice shall be included *) -(* in all copies or substantial portions of the Software. *) -(* *) -(* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR*) -(* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, *) -(* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL *) -(* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER*) -(* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING *) -(* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER *) -(* DEALINGS IN THE SOFTWARE. *) -(* *) -(*****************************************************************************) - -(** This module is meant to be used by python's tests to create legacy - stores that will be used to test legacy features. *) - -open Legacy_utils -open Filename.Infix - -let available_snapshots = function - | History_mode.Legacy.Archive -> [History_mode.Legacy.Full; Rolling] - | Full -> [Full; Rolling] - | Rolling -> [Rolling] - -let run ~base_dir ~legacy_store_builder_exe ~with_snapshots nb_blocks = - (* Build a new store un archive mode *) - build_new_store - nb_blocks - ~base_dir - ~patch_context - ~history_mode:History_mode.Archive - >>=? fun (store, genesis, genesis_block, blocks) -> - (* From the baked blocks, build all available history modes *) - let history_modes = History_mode.Legacy.[Archive; Full; Rolling] in - List.iter_es - (fun legacy_history_mode -> - let legacy_data_dir = - base_dir - // Format.asprintf - "%a_store_to_upgrade" - History_mode.Legacy.pp - legacy_history_mode - in - Format.printf "[Legacy store maker] Building %s@.@." legacy_data_dir ; - let legacy_store_dir = legacy_data_dir // "store" in - let legacy_context_dir = legacy_data_dir // "context" in - build_old_store - ~genesis - ~genesis_block - ~legacy_history_mode - ~store - ~legacy_data_dir - ~legacy_store_builder_exe - blocks - >>=? fun _ -> - (if with_snapshots then - List.iter_es - (fun snapshot_mode -> - let snapshot_file = - base_dir - // Format.asprintf - "snapshot_from_%a_storage.%a" - History_mode.Legacy.pp - legacy_history_mode - History_mode.Legacy.pp - snapshot_mode - in - let head_hash = - Store.Block.hash - List.(last_opt blocks |> WithExceptions.Option.get ~loc:__LOC__) - in - Format.printf - "[Legacy store maker] Exporting snapshot file: %s@.@." - snapshot_file ; - Legacy_snapshots.export - ~export_rolling:(snapshot_mode = History_mode.Legacy.Rolling) - ~store_root:legacy_store_dir - ~context_root:legacy_context_dir - ~genesis - snapshot_file - head_hash) - (available_snapshots legacy_history_mode) - else return_unit) - >>=? fun () -> return_unit) - history_modes - >>=? fun () -> return_unit - -let () = - let base_dir = Sys.argv.(1) in - let legacy_store_builder_exe = Sys.argv.(2) in - let nb_blocks = `Blocks (int_of_string Sys.argv.(3)) in - let with_snapshots = match Sys.argv.(4) with "true" -> true | _ -> false in - match - Lwt_main.run - (run ~base_dir ~legacy_store_builder_exe ~with_snapshots nb_blocks) - with - | Ok () -> () - | Error err -> - Format.eprintf "%a@." pp_print_trace err ; - exit 1 diff --git a/src/lib_store/test/legacy_utils.ml b/src/lib_store/test/legacy_utils.ml deleted file mode 100644 index 4eae32709c41..000000000000 --- a/src/lib_store/test/legacy_utils.ml +++ /dev/null @@ -1,458 +0,0 @@ -(*****************************************************************************) -(* *) -(* Open Source License *) -(* Copyright (c) 2020 Nomadic Labs, *) -(* *) -(* Permission is hereby granted, free of charge, to any person obtaining a *) -(* copy of this software and associated documentation files (the "Software"),*) -(* to deal in the Software without restriction, including without limitation *) -(* the rights to use, copy, modify, merge, publish, distribute, sublicense, *) -(* and/or sell copies of the Software, and to permit persons to whom the *) -(* Software is furnished to do so, subject to the following conditions: *) -(* *) -(* The above copyright notice and this permission notice shall be included *) -(* in all copies or substantial portions of the Software. *) -(* *) -(* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR*) -(* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, *) -(* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL *) -(* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER*) -(* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING *) -(* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER *) -(* DEALINGS IN THE SOFTWARE. *) -(* *) -(*****************************************************************************) - -let genesis = - {Test_utils.genesis with protocol = Tezos_protocol_genesis.Protocol.hash} - -let genesis_parameters = - ( "sandbox_parameter", - `O - [ - ( "genesis_pubkey", - `String "edpkuSLWfVU1Vq7Jg9FucPyKmma6otcMHac9zG4oU1KMHSTBpJuGQ2" ); - ] ) - -let default_genesis_accounts = - let open Tezos_protocol_alpha.Protocol in - let open Alpha_context in - let initial_accounts = - [ - ( "tz1KqTpEZ7Yob7QbPE4Hy4Wo8fHG8LhKxZSx", - "edpkuBknW28nW72KG6RoHtYW7p12T6GKc7nAbwYX5m8Wd9sDVC9yav", - "edsk3gUfUPyBSfrS9CCgmCiQsTCHGkviBDusMxDJstFtojtc1zcpsh" ); - ( "tz1gjaF81ZRRvdzjobyfVNsAeSC6PScjfQwN", - "edpktzNbDAUjUk697W7gYg2CRuBQjyPxbEg8dLccYYwKSKvkPvjtV9", - "edsk39qAm1fiMjgmPkw1EgQYkMzkJezLNewd7PLNHTkr6w9XA2zdfo" ); - ( "tz1faswCTDciRzE4oJ9jn2Vm2dvjeyA9fUzU", - "edpkuTXkJDGcFd5nh6VvMz8phXxU3Bi7h6hqgywNFi1vZTfQNnS1RV", - "edsk4ArLQgBTLWG5FJmnGnT689VKoqhXwmDPBuGx3z4cvwU9MmrPZZ" ); - ( "tz1b7tUupMgCNw2cCLpKTkSD1NZzB5TkP2sv", - "edpkuFrRoDSEbJYgxRtLx2ps82UdaYc1WwfS9sE11yhauZt5DgCHbU", - "edsk2uqQB9AY4FvioK2YMdfmyMrer5R8mGFyuaLLFfSRo8EoyNdht3" ); - ( "tz1ddb9NMYHZi5UzPdzTZMYQQZoMub195zgv", - "edpkv8EUUH68jmo3f7Um5PezmfGrRF24gnfLpH3sVNwJnV5bVCxL2n", - "edsk4QLrcijEffxV31gGdN2HU7UpyJjA8drFoNcmnB28n89YjPNRFm" ); - ] - in - let default_amount = Tez.of_mutez_exn 4_000_000_000_000L in - let open Alpha_utils.Account in - let to_account (pkh, pk, sk) = - { - pkh = Signature.Public_key_hash.of_b58check_exn pkh; - pk = Signature.Public_key.of_b58check_exn pk; - sk = Signature.Secret_key.of_b58check_exn sk; - } - in - let accounts = List.map to_account initial_accounts in - List.iter add_account accounts ; - List.map (fun acc -> account_to_bootstrap (acc, default_amount)) accounts - -(* Inlined from Tezos_shell.Patch_context to avoid cyclic dependencies *) -let patch_context ctxt = - let key_json = Some genesis_parameters in - (match key_json with - | None -> Lwt.return ctxt - | Some (key, json) -> - Tezos_context.Context.add - ctxt - [key] - (Data_encoding.Binary.to_bytes_exn Data_encoding.json json)) - >>= fun ctxt -> - Tezos_protocol_updater.Registered_protocol.get_result genesis.protocol - >>=? fun proto -> - let module Proto = (val proto) in - let ctxt = Tezos_shell_context.Shell_context.wrap_disk_context ctxt in - Proto.init - ctxt - { - level = 0l; - proto_level = 0; - predecessor = genesis.block; - timestamp = genesis.time; - validation_passes = 0; - operations_hash = Operation_list_list_hash.empty; - fitness = []; - context = Context_hash.zero; - } - >>=? fun {context; _} -> - return (Tezos_shell_context.Shell_context.unwrap_disk_context context) - -let make_legacy_store ?(user_activated_upgrades = []) - ?(user_activated_protocol_overrides = []) ~legacy_store_builder_exe - ~history_mode ~genesis_block store genesis (blocks : Store.Block.t list) - ~output_dir = - let legacy_dir = output_dir in - let proc = - let exe = legacy_store_builder_exe in - if not (Sys.file_exists exe) then - Format.ksprintf Stdlib.invalid_arg "File %s does not exist" exe ; - Lwt_process.open_process_full (exe, [|Filename.basename exe; legacy_dir|]) - in - let proc_reader = - let rec loop () = - External_validation.recv proc#stdout Data_encoding.string >>= fun l -> - Format.printf "[Legacy store builder] %s@." l ; - loop () - in - Lwt.catch - (fun () -> loop ()) - (function - | End_of_file -> - Format.printf "[Legacy store builder] Terminating@.@." ; - Lwt.return_unit - | exn -> Lwt.fail exn) - in - Lwt.async (fun () -> proc_reader) ; - (* Initialize the legacy state *) - External_validation.send - proc#stdin - (Data_encoding.tup2 - History_mode.Legacy.encoding - External_validation.parameters_encoding) - ( history_mode, - { - External_validation.context_root = Filename.concat legacy_dir "context"; - protocol_root = Filename.concat legacy_dir "protocol"; - sandbox_parameters = Some (snd genesis_parameters); - genesis; - user_activated_upgrades; - user_activated_protocol_overrides; - } ) - >>= fun () -> - (* Start sending blocks *) - let chain_store = Store.main_chain_store store in - let chain_id = Store.Chain.chain_id chain_store in - Lwt_list.fold_left_s - (fun pred_block block -> - let block_header = Store.Block.header block in - let operations = Store.Block.operations block in - let max_operations_ttl = - (WithExceptions.Option.get - ~loc:__LOC__ - (Store.Unsafe.repr_of_block pred_block).Block_repr.metadata) - .max_operations_ttl - in - let predecessor_ops_metadata_hash = - Store.Block.all_operations_metadata_hash pred_block - in - External_validation.send - proc#stdin - External_validation.request_encoding - (Validate - { - chain_id; - block_header; - predecessor_block_header = Store.Block.header pred_block; - predecessor_block_metadata_hash = - Store.Block.block_metadata_hash pred_block; - predecessor_ops_metadata_hash; - operations; - max_operations_ttl; - }) - >>= fun () -> Lwt.return block) - genesis_block - blocks - >>= fun _ -> - External_validation.send - proc#stdin - External_validation.request_encoding - Terminate - >>= fun () -> - Lwt.join [proc_reader] >>= fun () -> - proc#status >>= fun status -> - Assert.is_true ~msg:"legacy builder exited" (status = Unix.(WEXITED 0)) ; - Lwt.return_unit - -open Filename.Infix - -let fitness_from_int64 fitness = - (* definition taken from src/proto_alpha/lib_protocol/src/constants_repr.ml *) - let version_number = "\000" in - (* definitions taken from src/proto_alpha/lib_protocol/src/fitness_repr.ml *) - let int64_to_bytes i = - let b = Bytes.create 8 in - TzEndian.set_int64 b 0 i ; - b - in - [Bytes.of_string version_number; int64_to_bytes fitness] - -let make_block_header shell command = - let protocol_data = - Data_encoding.Binary.to_bytes_exn - Tezos_protocol_genesis.Protocol.Data.Command.encoding - command - in - ({shell; protocol_data} : Block_header.t) - -let make_activation_block genesis_block = - let open Tezos_protocol_genesis.Protocol.Data.Command in - let fitness = fitness_from_int64 1L in - let protocol_parameters = - Data_encoding.Binary.to_bytes_exn - Data_encoding.json - (Data_encoding.Json.construct - Tezos_protocol_alpha.Protocol.Alpha_context.Parameters.encoding - { - Alpha_utils.default_genesis_parameters with - bootstrap_accounts = default_genesis_accounts; - }) - in - let command = - Activate - { - protocol = Tezos_protocol_alpha.Protocol.hash; - fitness; - protocol_parameters; - } - in - let operations_hash = Operation_list_list_hash.compute [] in - let shell_header = - ({ - (Alpha_utils.Forge.make_shell - ~level:(Int32.succ (Store.Block.level genesis_block)) - ~predecessor:(Store.Block.hash genesis_block) - ~timestamp: - (Time.Protocol.add (Store.Block.timestamp genesis_block) 60L) - ~fitness - ~operations_hash - ~proto_level:1) - with - validation_passes = 0; - } - : Block_header.shell_header) - in - let block_header = make_block_header shell_header command in - ( block_header, - ({command; signature = Signature.zero} - : Tezos_protocol_genesis.Protocol.block_header_data) ) - -let init_store ~base_dir ~patch_context ~history_mode k = - let new_store_dir = base_dir // "new_store" in - let store_dir = new_store_dir // "store" in - let context_dir = new_store_dir // "context" in - Store.init - ~patch_context - ~history_mode - ~store_dir - ~context_dir - ~allow_testchains:false - genesis - >>=? fun store -> k (store_dir, context_dir) store - -let build_new_store nb_blocks ~base_dir ~patch_context ~history_mode = - let k (_store_dir, _context_dir) store = - let chain_store = Store.main_chain_store store in - let genesis = Store.Chain.genesis chain_store in - Store.Chain.genesis_block chain_store >>= fun genesis_block -> - let chain_id = Store.Chain.chain_id chain_store in - (* Make the activation block with "dummy" context_hash and protocol_data *) - let (activation_block, protocol_data) = - make_activation_block genesis_block - in - Store.Block.context chain_store genesis_block >>=? fun genesis_ctxt -> - (* Craft a block in order to get a right context_hash *) - Tezos_protocol_genesis.Protocol.begin_construction - ~chain_id - ~predecessor_context: - (Tezos_shell_context.Shell_context.wrap_disk_context genesis_ctxt) - ~predecessor_timestamp:(Store.Block.timestamp genesis_block) - ~predecessor_level:(Store.Block.level genesis_block) - ~predecessor_fitness:(Store.Block.fitness genesis_block) - ~predecessor:genesis.block - ~timestamp:(Time.Protocol.add (Store.Block.timestamp genesis_block) 60L) - ~protocol_data - () - >>= fun res -> - Lwt.return (Tezos_protocol_environment_genesis.Environment.wrap_error res) - >>=? fun vs -> - Tezos_protocol_genesis.Protocol.finalize_block vs >>= fun res -> - Lwt.return (Tezos_protocol_environment_genesis.Environment.wrap_error res) - >>=? fun ({context; _}, _meta) -> - Tezos_protocol_alpha.Protocol.Main.init context activation_block.shell - >>= fun res -> - Lwt.return (Tezos_protocol_environment_alpha.Environment.wrap_tzresult res) - >>=? fun vr -> - let ctxt_hash = - Tezos_context.Context.hash - ~time:activation_block.shell.timestamp - (Tezos_shell_context.Shell_context.unwrap_disk_context vr.context) - in - (* Use the right context hash in activation block *) - let shell = {activation_block.shell with context = ctxt_hash} in - let activation_block = {activation_block with shell} in - let block = - Tezos_protocol_genesis.Protocol.Data.Command.forge - shell - protocol_data.command - in - let sk = - Signature.Secret_key.of_b58check_exn - "edsk31vznjHSSpGExDMHYASz45VZqXN4DPxvsa4hAyY8dHM28cZzp6" - in - (* Sign the block with activator key *) - let signature = - Signature.sign ~watermark:Signature.(Block_header chain_id) sk block - in - (* Protocol_data is the signed block *) - let protocol_data = - Signature.concat - (Data_encoding.Binary.to_bytes_exn - Tezos_protocol_genesis.Protocol.Data.Command.encoding - protocol_data.command) - signature - in - (* Use the right protocol data *) - let activation_block = {activation_block with protocol_data} in - (* Regular apply + store *) - let apply_environment = - { - Block_validation.max_operations_ttl = 0; - chain_id; - predecessor_block_header = Store.Block.header genesis_block; - predecessor_context = genesis_ctxt; - predecessor_block_metadata_hash = None; - predecessor_ops_metadata_hash = None; - user_activated_upgrades = []; - user_activated_protocol_overrides = []; - } - in - Block_validation.apply apply_environment activation_block [] ~cache:`Lazy - >>=? fun {result = bv; _} -> - (Store.Block.store_block - chain_store - ~block_header:activation_block - ~operations:[] - bv - >>=? function - | Some b -> return b - | None -> assert false) - >>=? fun block -> - Lwt.finalize - (fun () -> - match nb_blocks with - | `Cycle n -> Alpha_utils.bake_until_n_cycle_end chain_store n block - | `Blocks n -> Alpha_utils.bake_n chain_store n block) - (fun () -> - let bs = Store.Unsafe.get_block_store chain_store in - Block_store.await_merging bs >>= fun _ -> Lwt.return_unit) - >>=? fun (blocks, _last_head) -> - return (store, genesis, genesis_block, block :: blocks) - in - init_store ~base_dir ~patch_context ~history_mode k - -let build_old_store ~genesis ~genesis_block ~legacy_history_mode:history_mode - ~store ~legacy_data_dir ~legacy_store_builder_exe blocks = - make_legacy_store - ~legacy_store_builder_exe - ~genesis_block - ~history_mode - store - genesis - blocks - ~output_dir:legacy_data_dir - >>= fun () -> - Legacy_state.init - ~readonly:false - ~history_mode - ~store_root:(legacy_data_dir // "store") - ~context_root:(legacy_data_dir // "context") - genesis - -let store_builder ?(legacy_history_mode = History_mode.Legacy.Full) - ?(nb_blocks = `Cycle 8) ~base_dir ~legacy_store_builder_exe () = - let history_mode = History_mode.convert legacy_history_mode in - build_new_store nb_blocks ~base_dir ~patch_context ~history_mode - >>=? fun (store, genesis, genesis_block, blocks) -> - let legacy_data_dir = base_dir // "store_to_upgrade" in - build_old_store - ~genesis - ~genesis_block - ~legacy_history_mode - ~store - ~legacy_data_dir - ~legacy_store_builder_exe - blocks - >>=? fun (legacy_state, _, _, _) -> - return (store, (legacy_data_dir, legacy_state), blocks) - -type test = { - name : string; - speed : [`Quick | `Slow]; - legacy_history_mode : History_mode.Legacy.t; - nb_blocks : [`Cycle of int | `Blocks of int]; - test : - Store.t -> - string * Legacy_state.t -> - Store.Block.t list -> - unit tzresult Lwt.t; -} - -let wrap_test_legacy ?(keep_dir = false) test : string Alcotest_lwt.test_case = - let {name; speed; legacy_history_mode; nb_blocks; test} = test in - let prefix_dir = "tezos_indexed_store_test_" in - let with_dir f = - if not keep_dir then - Lwt_utils_unix.with_tempdir prefix_dir (fun base_dir -> - Lwt.catch - (fun () -> f base_dir) - (fun exn -> - Lwt_utils_unix.remove_dir base_dir >>= fun () -> fail_with_exn exn)) - else - let base_dir = Filename.temp_file prefix_dir "" in - Lwt_unix.unlink base_dir >>= fun () -> - Lwt_unix.mkdir base_dir 0o700 >>= fun () -> - Format.printf "@\nPersisting dir %s for test.@." base_dir ; - f base_dir - in - let test _ legacy_store_builder_exe = - with_dir (fun base_dir -> - store_builder - ~base_dir - ~legacy_history_mode - ~nb_blocks - ~legacy_store_builder_exe - () - >>=? fun (store, (legacy_store_dir, legacy_state), blocks) -> - Lwt.finalize - (fun () -> - protect - ~on_error:(fun err -> - Store.make_pp_store store >>= fun pp_store -> - Format.eprintf "@[DEBUG:@ %a@]@." pp_store () ; - Lwt.return (Error err)) - (fun () -> test store (legacy_store_dir, legacy_state) blocks)) - (fun () -> - Legacy_state.close legacy_state >>= fun () -> - Store.close_store store)) - in - Alcotest_lwt.test_case name speed (fun x exe -> - test x exe >>= function - | Ok () -> Lwt.return_unit - | Error errs -> - Format.printf - "@\nError while running test:@\n%a@." - Error_monad.pp_print_trace - errs ; - Lwt.fail Alcotest.Test_error) diff --git a/src/lib_store/test/test_legacy.ml b/src/lib_store/test/test_legacy.ml deleted file mode 100644 index 6a37d423273d..000000000000 --- a/src/lib_store/test/test_legacy.ml +++ /dev/null @@ -1,667 +0,0 @@ -(*****************************************************************************) -(* *) -(* Open Source License *) -(* Copyright (c) 2018 Dynamic Ledger Solutions, Inc. *) -(* Copyright (c) 2020 Nomadic Labs, *) -(* *) -(* Permission is hereby granted, free of charge, to any person obtaining a *) -(* copy of this software and associated documentation files (the "Software"),*) -(* to deal in the Software without restriction, including without limitation *) -(* the rights to use, copy, modify, merge, publish, distribute, sublicense, *) -(* and/or sell copies of the Software, and to permit persons to whom the *) -(* Software is furnished to do so, subject to the following conditions: *) -(* *) -(* The above copyright notice and this permission notice shall be included *) -(* in all copies or substantial portions of the Software. *) -(* *) -(* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR*) -(* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, *) -(* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL *) -(* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER*) -(* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING *) -(* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER *) -(* DEALINGS IN THE SOFTWARE. *) -(* *) -(*****************************************************************************) - -open Test_utils -open Legacy_utils - -let assert_presence new_chain_store previously_baked_blocks ?savepoint ?caboose - = function - | History_mode.Archive -> - assert_presence_in_store - ~with_metadata:true - new_chain_store - previously_baked_blocks - | Full _ -> - let expected_savepoint = - WithExceptions.Option.get ~loc:__LOC__ savepoint - in - let (pruned, complete) = - List.split_n (Int32.to_int expected_savepoint) previously_baked_blocks - in - assert_presence_in_store ~with_metadata:false new_chain_store pruned - >>=? fun () -> - assert_presence_in_store ~with_metadata:true new_chain_store complete - | Rolling _ -> - let expected_caboose = WithExceptions.Option.get ~loc:__LOC__ caboose in - let expected_savepoint = - WithExceptions.Option.get ~loc:__LOC__ savepoint - in - let (pruned, complete) = - let rolling_window = - List.filter - (fun b -> Store.Block.level b >= expected_caboose) - previously_baked_blocks - in - List.split_n (Int32.to_int expected_savepoint) rolling_window - in - assert_presence_in_store ~with_metadata:false new_chain_store pruned - >>=? fun () -> - assert_presence_in_store ~with_metadata:true new_chain_store complete - -let check_flags new_chain_store previously_baked_blocks history_mode = - let last = - List.last_opt previously_baked_blocks - |> WithExceptions.Option.get ~loc:__LOC__ - in - Assert.equal_history_mode - ~msg:"history mode consistency: " - history_mode - (Store.Chain.history_mode new_chain_store) ; - Store.Chain.checkpoint new_chain_store >>= fun checkpoint -> - Store.Block.get_block_metadata new_chain_store last >>=? fun metadata -> - let expected_checkpoint = Store.Block.last_allowed_fork_level metadata in - Assert.equal - ~prn:(Format.sprintf "%ld") - ~msg:"checkpoint consistency: " - expected_checkpoint - (snd checkpoint) ; - Store.Chain.savepoint new_chain_store >>= fun savepoint -> - Store.Chain.caboose new_chain_store >>= fun caboose -> - match history_mode with - | History_mode.Archive -> - Assert.equal - ~prn:(Format.sprintf "%ld") - ~msg:"savepoint consistency: " - 0l - (snd savepoint) ; - Assert.equal - ~prn:(Format.sprintf "%ld") - ~msg:"caboose consistency: " - (snd savepoint) - (snd caboose) ; - assert_presence new_chain_store previously_baked_blocks history_mode - | Full _ -> - Assert.equal - ~prn:(Format.sprintf "%ld") - ~msg:"savepoint consistency: " - expected_checkpoint - (snd savepoint) ; - Assert.equal - ~prn:(Format.sprintf "%ld") - ~msg:"caboose consistency: " - 0l - (snd caboose) ; - assert_presence - new_chain_store - previously_baked_blocks - ~savepoint:(snd savepoint) - history_mode - | Rolling _ -> - Assert.equal - ~prn:(Format.sprintf "%ld") - ~msg:"savepoint consistency: " - expected_checkpoint - (snd savepoint) ; - Store.Block.get_block_metadata new_chain_store last >>=? fun metadata -> - let max_op_ttl = Store.Block.max_operations_ttl metadata in - let expected_caboose = - max 0l Int32.(add (sub expected_checkpoint (of_int max_op_ttl)) 0l) - in - Assert.equal - ~prn:(Format.sprintf "%ld") - ~msg:"caboose consistency: " - expected_caboose - (snd caboose) ; - assert_presence - new_chain_store - previously_baked_blocks - ~caboose:expected_caboose - ~savepoint:expected_checkpoint - history_mode - -let test_upgrade store (legacy_dir, (legacy_state : Legacy_state.t)) blocks = - let patch_context ctxt = Alpha_utils.default_patch_context ctxt in - let chain_store = Store.main_chain_store store in - let genesis = Store.Chain.genesis chain_store in - Lwt_utils_unix.create_dir legacy_dir >>= fun () -> - let chain_name = Distributed_db_version.Name.of_string "TEZOS" in - Legacy_state.Chain.get_exn legacy_state (Store.Chain.chain_id chain_store) - >>= fun legacy_chain -> - Lwt_list.map_p - (fun block -> - let hash = Store.Block.hash block in - Legacy_state.Block.known legacy_chain hash >>= fun known -> - Lwt.return (hash, known)) - blocks - >>= fun present_blocks_in_legacy -> - Legacy.upgrade_0_0_4 ~data_dir:legacy_dir ~patch_context ~chain_name genesis - >>=? fun _upgrade_message -> - let history_mode = Store.Chain.history_mode chain_store in - let open Filename.Infix in - Store.init - ~patch_context - ~history_mode - ~readonly:false - ~store_dir:(legacy_dir // "store") - ~context_dir:(legacy_dir // "context") - ~allow_testchains:true - genesis - >>=? fun upgraded_store -> - Lwt.finalize - (fun () -> - let upgraded_chain_store = Store.main_chain_store upgraded_store in - Lwt_list.iter_s - (fun (hash, is_known) -> - Store.Block.is_known upgraded_chain_store hash >>= fun is_known' -> - Assert.equal - ~msg: - (Format.asprintf - "check %a existence after upgrade" - Block_hash.pp - hash) - is_known - is_known' ; - Lwt.return_unit) - present_blocks_in_legacy - >>= fun () -> - check_flags upgraded_chain_store blocks history_mode >>=? fun () -> - Test_utils.check_invariants upgraded_chain_store >>=? fun () -> - (* Try baking a bit after upgrading... *) - Store.Chain.current_head upgraded_chain_store >>= fun head -> - Alpha_utils.bake_until_n_cycle_end upgraded_chain_store 10 head - >>=? fun _ -> return_unit) - (fun () -> Store.close_store upgraded_store) - -let test_legacy_snapshot legacy_snapshot_history_mode store - (legacy_dir, (legacy_state : Legacy_state.t)) blocks = - let patch_context ctxt = Alpha_utils.default_patch_context ctxt in - let chain_store = Store.main_chain_store store in - let genesis = Store.Chain.genesis chain_store in - Lwt_utils_unix.create_dir legacy_dir >>= fun () -> - let chain_name = Distributed_db_version.Name.of_string "TEZOS" in - Legacy_state.Chain.get_exn legacy_state (Store.Chain.chain_id chain_store) - >>= fun legacy_chain -> - Lwt_list.map_p - (fun block -> - let descr = Store.Block.descriptor block in - Legacy_state.Block.known legacy_chain (fst descr) >>= fun known -> - Lwt.return (descr, known)) - blocks - >>= fun present_blocks_in_legacy -> - Legacy_chain.head legacy_chain >>= fun legacy_head -> - let open Filename.Infix in - let snapshot_file = legacy_dir // "legacy_snapshot" in - let head_hash = Legacy_state.Block.hash legacy_head in - Legacy_snapshots.export - ~export_rolling:(legacy_snapshot_history_mode = History_mode.Legacy.Rolling) - ~store_root:(legacy_dir // "store") - ~context_root:(legacy_dir // "context") - ~genesis - snapshot_file - head_hash - >>=? fun () -> - let open Filename.Infix in - let root_dir = - Naming.dir_path (Store.directory store) // ".." // "imported_store" - in - let dst_store_dir = root_dir // "store" in - let dst_context_dir = legacy_dir // "context" in - Snapshots.import_legacy - ~patch_context - ~block:head_hash - ~dst_store_dir - ~dst_context_dir - ~chain_name - ~user_activated_upgrades:[] - ~user_activated_protocol_overrides:[] - ~snapshot_file - genesis - >>=? fun () -> - let history_mode = History_mode.convert legacy_snapshot_history_mode in - Store.init - ~patch_context - ~history_mode - ~readonly:false - ~store_dir:dst_store_dir - ~context_dir:dst_context_dir - ~allow_testchains:true - genesis - >>=? fun imported_store -> - let imported_chain_store = Store.main_chain_store imported_store in - Lwt.finalize - (fun () -> - Lwt.catch - (fun () -> - Lwt_list.iter_s - (fun ((hash, level), is_known) -> - Store.Block.is_known imported_chain_store hash - >>= fun is_known' -> - if is_known && not is_known' then ( - Store.Chain.caboose imported_chain_store - >>= fun (_, caboose_level) -> - Assert.is_true - ~msg:"check block absence consistency with history mode" - (match history_mode with - | Rolling _ -> caboose_level > level - | _ -> false) ; - Lwt.return_unit) - else ( - Assert.equal - ~msg: - (Format.asprintf - "check %a existence after upgrade" - Block_hash.pp - hash) - is_known - is_known' ; - Lwt.return_unit)) - present_blocks_in_legacy - >>= fun () -> - Test_utils.check_invariants imported_chain_store >>=? fun () -> - (* Try baking a bit after importing... *) - Store.Chain.current_head imported_chain_store - >>= fun head_after_import -> - Alpha_utils.bake_until_n_cycle_end - imported_chain_store - 10 - head_after_import - >>=? fun _ -> - let highest_cemented_block = - Cemented_block_store.get_highest_cemented_level - (Store.Unsafe.get_block_store imported_chain_store - |> Block_store.cemented_block_store) - in - match highest_cemented_block with - | None -> return_unit - | Some highest_cemented_level -> - Assert.is_true - ~msg:"is the highest cemented block above the new head" - Compare.Int32.( - highest_cemented_level > Store.Block.level head_after_import) ; - return_unit) - (fun e -> - Store.make_pp_store imported_store >>= fun pp -> - Format.printf "DEBUG-IMPORTED: %a@." pp () ; - Lwt.fail e)) - (fun () -> Store.close_store imported_store) - -let test_upgrade_from_snapshot legacy_snapshot_history_mode store - (legacy_dir, (legacy_state : Legacy_state.t)) blocks = - let patch_context ctxt = Alpha_utils.default_patch_context ctxt in - let chain_store = Store.main_chain_store store in - let genesis = Store.Chain.genesis chain_store in - Lwt_utils_unix.create_dir legacy_dir >>= fun () -> - let chain_name = Distributed_db_version.Name.of_string "TEZOS" in - Legacy_state.Chain.get_exn legacy_state (Store.Chain.chain_id chain_store) - >>= fun legacy_chain -> - Lwt_list.map_p - (fun block -> - let descr = Store.Block.descriptor block in - Legacy_state.Block.known legacy_chain (fst descr) >>= fun known -> - Lwt.return (descr, known)) - blocks - >>= fun present_blocks_in_legacy -> - Legacy_chain.head legacy_chain >>= fun legacy_head -> - let open Filename.Infix in - let snapshot_file = legacy_dir // "legacy_snapshot" in - let head_hash = Legacy_state.Block.hash legacy_head in - Legacy_snapshots.export - ~export_rolling:(legacy_snapshot_history_mode = History_mode.Legacy.Rolling) - ~store_root:(legacy_dir // "store") - ~context_root:(legacy_dir // "context") - ~genesis - snapshot_file - head_hash - >>=? fun () -> - let imported_root_dir = - Naming.dir_path (Store.directory store) // ".." // ".." // "imported_store" - in - let imported_store_dir = imported_root_dir // "store" in - let imported_context_dir = imported_root_dir // "context" in - Lwt_unix.mkdir imported_root_dir 0o700 >>= fun () -> - Legacy_snapshots.import - ~patch_context - ~data_dir:imported_root_dir - ~user_activated_upgrades:[] - ~user_activated_protocol_overrides:[] - ~dir_cleaner:(fun _ -> Lwt.return_unit) - ~genesis - snapshot_file - ~block:(Some (Block_hash.to_b58check head_hash)) - >>=? fun () -> - Legacy_state.init - ~patch_context - ~store_root:imported_store_dir - ~context_root:imported_context_dir - genesis - >>=? fun (state, chain_state, _, _) -> - ( Legacy_state.Chain.checkpoint chain_state >|= fun bh -> - Some (Block_header.hash bh, bh.shell.level) ) - >>= fun expected_checkpoint -> - (Legacy_state.Chain.save_point chain_state >|= fun (l, bh) -> Some (bh, l)) - >>= fun expected_savepoint -> - (Legacy_state.Chain.caboose chain_state >|= fun (l, bh) -> Some (bh, l)) - >>= fun expected_caboose -> - Legacy_state.close state >>= fun () -> - Legacy.upgrade_0_0_4 - ~data_dir:imported_root_dir - ~patch_context - ~chain_name - genesis - >>=? fun _upgrade_message -> - let history_mode = Store.Chain.history_mode chain_store in - Store.init - ~patch_context - ~history_mode - ~readonly:false - ~store_dir:imported_store_dir - ~context_dir:imported_context_dir - ~allow_testchains:true - genesis - >>=? fun upgraded_store -> - let upgraded_chain_store = Store.main_chain_store upgraded_store in - Lwt.finalize - (fun () -> - Lwt_list.iter_s - (fun ((hash, level), is_known) -> - Store.Block.is_known upgraded_chain_store hash >>= fun is_known' -> - if is_known && not is_known' then ( - Store.Chain.caboose upgraded_chain_store - >>= fun (_, caboose_level) -> - Assert.is_true - ~msg:"check block absence consistency with history mode" - (match history_mode with - | Rolling _ -> caboose_level > level - | _ -> false) ; - Lwt.return_unit) - else ( - Assert.equal - ~msg: - (Format.asprintf - "check %a existence after upgrade" - Block_hash.pp - hash) - is_known - is_known' ; - Lwt.return_unit)) - present_blocks_in_legacy - >>= fun () -> - Test_utils.check_invariants - upgraded_chain_store - ~expected_checkpoint - ~expected_savepoint - ~expected_caboose - >>=? fun () -> - (* Try baking a bit after importing... *) - Store.Chain.current_head upgraded_chain_store >>= fun head -> - Alpha_utils.bake_until_n_cycle_end upgraded_chain_store 10 head - >>=? fun _ -> return_unit) - (fun () -> Store.close_store upgraded_store) - -(* This test aims to create a v1 snapshot (from the legacy store) - which does not contains the block and operations metadata hash and - check that the reconstruction procedure of the new store manages to - restore the missing data. *) -let test_legacy_reconstruct legacy_snapshot_history_mode store - (legacy_dir, (legacy_state : Legacy_state.t)) _blocks = - let patch_context ctxt = Legacy_utils.patch_context ctxt in - let chain_store = Store.main_chain_store store in - let genesis = Store.Chain.genesis chain_store in - Lwt_utils_unix.create_dir legacy_dir >>= fun () -> - let chain_name = Distributed_db_version.Name.of_string "TEZOS" in - Legacy_state.Chain.get_exn legacy_state (Store.Chain.chain_id chain_store) - >>= fun legacy_chain -> - Legacy_chain.head legacy_chain >>= fun legacy_head -> - let open Filename.Infix in - let snapshot_file = legacy_dir // "legacy_snapshot" in - let head_hash = Legacy_state.Block.hash legacy_head in - Legacy_snapshots.export - ~export_rolling:(legacy_snapshot_history_mode = History_mode.Legacy.Rolling) - ~store_root:(legacy_dir // "store") - ~context_root:(legacy_dir // "context") - ~genesis - snapshot_file - head_hash - >>=? fun () -> - let open Filename.Infix in - let root_dir = - Naming.dir_path (Store.directory store) // ".." // "imported_store" - in - let dst_store_dir = root_dir // "store" in - let dst_context_dir = legacy_dir // "context" in - Snapshots.import_legacy - ~patch_context - ~block:head_hash - ~dst_store_dir - ~dst_context_dir - ~chain_name - ~user_activated_upgrades:[] - ~user_activated_protocol_overrides:[] - ~snapshot_file - genesis - >>=? fun () -> - Store.init - ~patch_context - ~history_mode:(History_mode.convert legacy_snapshot_history_mode) - ~readonly:false - ~store_dir:dst_store_dir - ~context_dir:dst_context_dir - ~allow_testchains:true - genesis - >>=? fun imported_store -> - let imported_chain_store = Store.main_chain_store imported_store in - (* Make sure that the imported blocks are missing the block metadata - hash. Here, we target 2 blocks below the head as the head and its - predecessor aims to be complete (as the head was applied thakns - to it's complete predecessor).*) - (Store.Block.read_block_opt imported_chain_store ~distance:2 head_hash - >>= function - | None -> - Alcotest.fail "A block is unexpectidely missing from the imported store" - | Some block -> - if Option.is_some (Store.Block.block_metadata_hash block) then - Alcotest.fail "Block metadata hash is available but should not." - else (* Block metadata hash is missing, as expected *) return_unit) - >>=? fun () -> - Store.close_store imported_store >>= fun () -> - Reconstruction.reconstruct - ~patch_context - ~store_dir:dst_store_dir - ~context_dir:dst_context_dir - genesis - ~user_activated_upgrades:[] - ~user_activated_protocol_overrides:[] - >>=? fun () -> - (* Restart the store, after the reconstruction, in archive mode. *) - Store.init - ~patch_context - ~history_mode:History_mode.Archive - ~readonly:false - ~store_dir:dst_store_dir - ~context_dir:dst_context_dir - ~allow_testchains:true - genesis - >>=? fun reconstructed_store -> - let reconstructed_chain_store = Store.main_chain_store reconstructed_store in - (Store.Block.read_block_opt reconstructed_chain_store ~distance:2 head_hash - >>= function - | None -> - Alcotest.fail "A block is unexpectidely missing from the imported store" - | Some block -> - if Option.is_none (Store.Block.block_metadata_hash block) then - Alcotest.fail "Block metadata hash is missing but should not." - else (* Block metadata hash is available, as expected *) return_unit) - >>=? fun () -> return_unit - -let make_upgrade_test_cases ?(keep_dir = false) speed : - string Alcotest_lwt.test_case list = - let history_modes = - History_mode.[Legacy.Archive; Legacy.Full; Legacy.Rolling] - in - let nb_blocks_to_bake = - match speed with `Slow -> 0 -- 100 | `Quick -> [8; 57; 89; 101] - in - let permutations = List.(product nb_blocks_to_bake history_modes) in - List.map - (fun (nb_blocks_to_bake, legacy_history_mode) -> - let name = - Format.asprintf - "Upgrade legacy %a with %d blocks" - History_mode.Legacy.pp - legacy_history_mode - nb_blocks_to_bake - in - let test = - { - name; - speed; - legacy_history_mode; - nb_blocks = `Blocks nb_blocks_to_bake; - test = test_upgrade; - } - in - wrap_test_legacy ~keep_dir test) - permutations - -let make_legacy_snapshot_test_cases ~keep_dir speed = - let history_modes = - History_mode.[Legacy.Archive; Legacy.Full; Legacy.Rolling] - in - let snapshot_history_modes = History_mode.[Legacy.Full; Legacy.Rolling] in - let nb_blocks_to_bake = [40; 57; 89; 101] in - let permutations = - List.( - product nb_blocks_to_bake (product history_modes snapshot_history_modes)) - |> List.filter - (fun (_, (legacy_history_mode, legacy_snapshot_history_mode)) -> - if legacy_history_mode = History_mode.Legacy.Rolling then - legacy_snapshot_history_mode = History_mode.Legacy.Rolling - else true) - |> List.sort_uniq compare - in - List.map - (fun (nb_blocks_to_bake, (legacy_history_mode, legacy_snapshot_history_mode)) - -> - let name = - Format.asprintf - "Import legacy snapshot in %a from %a with %d blocks" - History_mode.Legacy.pp - legacy_snapshot_history_mode - History_mode.Legacy.pp - legacy_history_mode - nb_blocks_to_bake - in - let test = - { - name; - speed; - legacy_history_mode; - nb_blocks = `Blocks nb_blocks_to_bake; - test = test_legacy_snapshot legacy_snapshot_history_mode; - } - in - wrap_test_legacy ~keep_dir test) - permutations - -let make_upgrade_after_snapshot_import_test_cases ?(keep_dir = false) speed : - string Alcotest_lwt.test_case list = - let history_modes = History_mode.[Legacy.Full; Rolling] in - let nb_blocks_to_bake = - match speed with `Slow -> 1 -- 100 | `Quick -> [8; 57; 89; 101] - in - let permutations = List.(product nb_blocks_to_bake history_modes) in - List.map - (fun (nb_blocks_to_bake, legacy_history_mode) -> - let name = - Format.asprintf - "Upgrade legacy %a after snapshot import with %d blocks" - History_mode.Legacy.pp - legacy_history_mode - nb_blocks_to_bake - in - let test = - { - name; - speed; - legacy_history_mode; - nb_blocks = `Blocks nb_blocks_to_bake; - test = test_upgrade_from_snapshot legacy_history_mode; - } - in - wrap_test_legacy ~keep_dir test) - permutations - -let make_legacy_reconstruct_test_cases ?(keep_dir = false) speed : - string Alcotest_lwt.test_case list = - (* Make sure that we also reconstruct through both cemented and - floating stores. *) - let nb_blocks_to_bake = [8 * 2; 8 * 5; 8 * 8] in - let legacy_history_mode = History_mode.Legacy.Full in - List.map - (fun nb_blocks -> - let name = - Format.asprintf - "Storage reconstruction after a legacy snapshot import with %d blocks" - nb_blocks - in - let nb_blocks = `Blocks nb_blocks in - let test = - { - name; - speed; - legacy_history_mode; - nb_blocks; - test = test_legacy_reconstruct legacy_history_mode; - } - in - wrap_test_legacy ~keep_dir test) - nb_blocks_to_bake - -let upgrade_tests : string Alcotest_lwt.test list = - let speed = - try - let s = Sys.getenv "SLOW_TEST" in - match String.(trim (uncapitalize_ascii s)) with - | "true" | "1" | "yes" -> `Slow - | _ -> `Quick - with Not_found -> `Quick - in - let upgrade_cases = make_upgrade_test_cases ~keep_dir:false speed in - let snapshots_cases = make_legacy_snapshot_test_cases ~keep_dir:false speed in - let upgrade_snapshots_cases = - make_upgrade_after_snapshot_import_test_cases ~keep_dir:false speed - in - let legacy_reconstruct_cases = - make_legacy_reconstruct_test_cases ~keep_dir:false speed - in - [ - ("legacy store upgrade", upgrade_cases); - ("legacy snapshot import", snapshots_cases); - ("legacy store upgrade after snapshot import", upgrade_snapshots_cases); - ("storage reconstruction after a legacy import", legacy_reconstruct_cases); - ] - -let () = - let open Cmdliner in - let arg = - Arg.( - required - & opt (some string) None - & info ~docv:"[LEGACY_STORE_BUILDER_PATH]" ["builder-path"]) - in - Lwt_main.run - ( Internal_event_unix.init () >>= fun () -> - Alcotest_lwt.run_with_args "tezos-store-legacy" arg upgrade_tests ) diff --git a/src/lib_store/tezos-store.opam b/src/lib_store/tezos-store.opam index a0af6fffae6f..2f740d720480 100644 --- a/src/lib_store/tezos-store.opam +++ b/src/lib_store/tezos-store.opam @@ -13,7 +13,6 @@ depends: [ "irmin-pack" { >= "2.8.0" } "tezos-stdlib-unix" "tezos-context" - "tezos-legacy-store" "tezos-validation" "tezos-protocol-updater" "lwt-watcher" { = "0.1" } diff --git a/tests_python/conftest.py b/tests_python/conftest.py index f3f7e95ad85c..debcf925f042 100644 --- a/tests_python/conftest.py +++ b/tests_python/conftest.py @@ -5,10 +5,7 @@ E.g. start and stop a server. The fixture is simply specified as a parameter in the test function, and the yielded values is then accessible with this parameter. """ -import subprocess -import shutil import os -import tempfile from typing import Optional, Iterator import pytest @@ -202,80 +199,3 @@ def pytest_addoption(parser: _pytest.config.argparsing.Parser) -> None: help="the node validates blocks using only one process,\ useful for debugging", ) - - -@pytest.fixture(scope="class") -def legacy_stores(request): - """Aims to generate legacy stores. - - The number of blocks to bake (batch), the home path and the - export_snapshots variables are pecified as a class annotation. - @pytest.mark.parametrize('legacy_stores', […], indirect=True) - """ - assert request.param is not None - home = request.param['home'] - batch = request.param['batch'] - export_snapshots = request.param['snapshot'] - session = {} - data_dir = tempfile.mkdtemp(prefix='tezos-legacy-stores.') - build_dir = '_build/' - builder_target = 'legacy_store_builder' - builder_path = f'src/lib_store/legacy_store/{builder_target}.exe' - builder_bin = f'{build_dir}default/{builder_path}' - maker_target = 'legacy_store_maker' - maker_path = f'src/lib_store/test/{maker_target}.exe' - maker_bin = f'{build_dir}default/{maker_path}' - subprocess.run( - [ - 'dune', - 'build', - '--build-dir', - f'{home}{build_dir}', - f'{builder_path}', - ], - check=True, - cwd=home, - ) - subprocess.run( - ['dune', 'build', '--build-dir', f'{home}{build_dir}', f'{maker_path}'], - check=True, - cwd=home, - ) - # Call the magic binary which generates legacy stores such as: - # data_dir/archive_store_to_upgrade - # /full_store_to_upgrade - # /rolling_store_to_upgrade - # where every store contains the "same chain" - subprocess.run( - [ - maker_bin, - data_dir, - builder_bin, - str(batch), - str(export_snapshots).lower(), - ], - check=True, - cwd=home, - ) - - # Store data paths in session - for history_mode in ['archive', 'full', 'rolling']: - path = f'{data_dir}/{history_mode}_store_to_upgrade' - session[f'{history_mode}_path'] = path - - # Store snapshot paths in legacy_stores - if export_snapshots: - for history_mode in ['archive', 'full']: - full_path = f'{data_dir}/snapshot_from_{history_mode}_storage.full' - session[f'from_{history_mode}.full'] = full_path - rolling_path = ( - f'{data_dir}' + f'/snapshot_from_{history_mode}_storage.rolling' - ) - session[f'from_{history_mode}.rolling'] = rolling_path - # Store the rolling path - session['from_rolling.rolling'] = ( - f'{data_dir}/snapshot_from' + '_rolling_storage.rolling' - ) - - yield session - shutil.rmtree(data_dir) diff --git a/tests_python/tests_011/conftest.py b/tests_python/tests_011/conftest.py index 19b68d81c525..06a327f70792 100644 --- a/tests_python/tests_011/conftest.py +++ b/tests_python/tests_011/conftest.py @@ -1,7 +1,6 @@ """Protocol-specific hooks and fixtures""" import tempfile -import shutil from typing import Optional, Iterator, List import pytest from launchers.sandbox import Sandbox @@ -109,38 +108,3 @@ def mockup_client(sandbox: Sandbox) -> Iterator[Client]: ).create_mockup_result assert res == CreateMockupResult.OK yield sandbox.create_client(base_dir=base_dir, mode="mockup") - - -@pytest.fixture(scope="class") -def nodes_legacy_store(sandbox, legacy_stores): - nodes = {} - - # TODO would be cleaner to return couples (node, client) in order to - # avoid relying on the invariant that nodes are numbered 1, 2, 3 - # or just return the id? - i = 1 - for history_mode in ['archive', 'full', 'rolling']: - node_dir = legacy_stores[f'{history_mode}_path'] - # init config with up to date version - params = constants.NODE_PARAMS + ['--history-mode', history_mode] - node = sandbox.register_node(i, node_dir=node_dir, params=params) - # Workaround to allow generating an identity on an - # old 0.0.4 storage with a 0.0.6 node - version = open(node_dir + "/version.json", "w") - version.write('{ "version": "0.0.6" }') - version.close() - node.init_config() - # write version to upgrade - version = open(node_dir + "/version.json", "w") - version.write('{ "version": "0.0.4" }') - version.close() - - nodes[history_mode] = node - i += 1 - - yield nodes - - # TODO think of case of failure before `yield` - for history_mode in ['archive', 'full', 'rolling']: - node_dir = legacy_stores[f'{history_mode}_path'] - shutil.rmtree(node_dir) diff --git a/tests_python/tests_011/test_legacy_snapshots.py b/tests_python/tests_011/test_legacy_snapshots.py deleted file mode 100644 index 5685ea360ef0..000000000000 --- a/tests_python/tests_011/test_legacy_snapshots.py +++ /dev/null @@ -1,353 +0,0 @@ -import shutil -import pytest -from tools import utils, paths, constants - -PARAMS = constants.NODE_PARAMS - -# Defines the number of blocks to bake in the following test. This -# constant should be higher than max_op_ttl and should be a multiple -# of the cycle length (8 in sandboxed mode) -BATCH = 160 - -HOME = paths.TEZOS_HOME - -GROUP = [1, 2, 3, 4, 5] - -EXPECTED_LEVEL = BATCH + 1 - - -def check_expected_values(head): - assert head['header']['level'] == EXPECTED_LEVEL - - -def restart(sandbox, node_id): - sandbox.node(node_id).terminate_or_kill() - sandbox.node(node_id).run() - assert sandbox.client(node_id).check_node_listening() - - -def expect_wrong_version(sandbox, node): - pattern = 'Found \'0.0.4\', expected \'0.0.6\'' - with utils.assert_run_failure(pattern): - sandbox.init_node(node, snapshot=None, reconstruct=False) - - -def remove_lmdb(node): - shutil.rmtree(node.node_dir + '/lmdb_store_to_remove') - - -def clean(node): - shutil.rmtree(node.node_dir) - - -MAP = { - "batch": BATCH, - "home": paths.TEZOS_HOME, - "snapshot": True, -} - - -@pytest.mark.incremental -@pytest.mark.snapshot -@pytest.mark.slow -@pytest.mark.parametrize('legacy_stores', [MAP], indirect=True) -class TestLegacy: - - # Generate legacy stores and export all kind of snapshots - def test_generate_legacy_stores(self, legacy_stores, session): - # Store snapshot paths in session - for history_mode in ['archive', 'full']: - path_full = legacy_stores[f'from_{history_mode}.full'] - session[f'from_{history_mode}.full'] = path_full - path_rolling = legacy_stores[f'from_{history_mode}.rolling'] - session[f'from_{history_mode}.rolling'] = path_rolling - # Store the rolling path - tmp = legacy_stores['from_rolling.rolling'] - session['from_rolling.rolling'] = tmp - session['head_level'] = EXPECTED_LEVEL - - ########################################################################### - # Import all kinds of snapshots - # New node: 3 - def test_run_full_node_from_archive_1(self, sandbox, legacy_stores): - file = legacy_stores['from_archive.full'] - sandbox.add_node(3, snapshot=file, params=constants.NODE_PARAMS) - - # New node: 4 - def test_run_rolling_node_from_archive_1(self, sandbox, legacy_stores): - file = legacy_stores['from_archive.rolling'] - sandbox.add_node(4, snapshot=file, params=constants.NODE_PARAMS) - - # New node 1 - def test_reset_full_node_from_full_1(self, sandbox, legacy_stores): - file = legacy_stores['from_full.full'] - sandbox.add_node(1, snapshot=file, params=constants.NODE_PARAMS) - - # New node: 5 - def test_run_rolling_node_from_full_1(self, sandbox, legacy_stores): - file = legacy_stores['from_full.rolling'] - sandbox.add_node(5, snapshot=file, params=constants.NODE_PARAMS) - - # New node 2 - def test_reset_rolling_node_from_rolling_1(self, sandbox, legacy_stores): - file = legacy_stores['from_rolling.rolling'] - sandbox.add_node(2, snapshot=file, params=constants.NODE_PARAMS) - - ########################################################################### - # Check consistency of imported snapshots with > 5 cycles - - # For the full nodes - def test_node_1_consistency_1(self, sandbox, session, legacy_stores): - # pylint: disable=unused-argument - node_id = 1 - restart(sandbox, node_id) - expected_level = session['head_level'] - expected_checkpoint = expected_level - expected_savepoint = expected_checkpoint - expected_caboose = 0 - utils.node_consistency_after_import( - node_id, - sandbox, - expected_level, - expected_checkpoint, - expected_savepoint, - expected_caboose, - ) - utils.full_node_blocks_availability( - node_id, sandbox, expected_savepoint, expected_level - ) - - def test_node_3_consistency_1(self, sandbox, session, legacy_stores): - # pylint: disable=unused-argument - node_id = 3 - restart(sandbox, node_id) - expected_level = session['head_level'] - expected_checkpoint = expected_level - expected_savepoint = expected_checkpoint - expected_caboose = 0 - utils.node_consistency_after_import( - node_id, - sandbox, - expected_level, - expected_checkpoint, - expected_savepoint, - expected_caboose, - ) - utils.full_node_blocks_availability( - node_id, sandbox, expected_savepoint, expected_level - ) - - # For the rolling nodes - def test_node_2_consistency_1(self, sandbox, session, legacy_stores): - # pylint: disable=unused-argument - node_id = 2 - restart(sandbox, node_id) - expected_level = session['head_level'] - expected_checkpoint = expected_level - head = sandbox.client(node_id).get_head() - max_op_ttl = head['metadata']['max_operations_ttl'] - expected_savepoint = expected_checkpoint - expected_caboose = expected_checkpoint - max_op_ttl - utils.node_consistency_after_import( - node_id, - sandbox, - expected_level, - expected_checkpoint, - expected_savepoint, - expected_caboose, - ) - utils.rolling_node_blocks_availability( - node_id, - sandbox, - expected_savepoint, - expected_caboose, - expected_level, - ) - - def test_node_4_consistency_1(self, sandbox, session, legacy_stores): - # pylint: disable=unused-argument - node_id = 4 - restart(sandbox, node_id) - expected_level = session['head_level'] - expected_checkpoint = expected_level - head = sandbox.client(node_id).get_head() - max_op_ttl = head['metadata']['max_operations_ttl'] - expected_savepoint = expected_checkpoint - expected_caboose = expected_checkpoint - max_op_ttl - utils.node_consistency_after_import( - node_id, - sandbox, - expected_level, - expected_checkpoint, - expected_savepoint, - expected_caboose, - ) - utils.rolling_node_blocks_availability( - node_id, - sandbox, - expected_savepoint, - expected_caboose, - expected_level, - ) - - def test_node_5_consistency_1(self, sandbox, session, legacy_stores): - # pylint: disable=unused-argument - node_id = 5 - restart(sandbox, node_id) - expected_level = session['head_level'] - expected_checkpoint = expected_level - head = sandbox.client(node_id).get_head() - max_op_ttl = head['metadata']['max_operations_ttl'] - expected_savepoint = expected_checkpoint - expected_caboose = expected_checkpoint - max_op_ttl - utils.node_consistency_after_import( - node_id, - sandbox, - expected_level, - expected_checkpoint, - expected_savepoint, - expected_caboose, - ) - utils.rolling_node_blocks_availability( - node_id, - sandbox, - expected_savepoint, - expected_caboose, - expected_level, - ) - - # Bake a few blocks to check if the Full and Rolling nodes catch up - def test_bake_to_catch_up(self, sandbox, session, legacy_stores): - # pylint: disable=unused-argument - for _ in range(BATCH): - utils.bake(sandbox.client(1)) - session['head_level'] = sandbox.client(1).get_head()['header']['level'] - for i in GROUP: - assert utils.check_level(sandbox.client(i), session['head_level']) - - ########################################################################### - # Check consistency of imported snapshots with > 5 cycles - - # For the full nodes - def test_node_1_consistency_2(self, sandbox, session, legacy_stores): - # pylint: disable=unused-argument - node_id = 1 - restart(sandbox, node_id) - expected_level = session['head_level'] - expected_checkpoint = expected_level - 2 * 8 # lafl(head) - head = sandbox.client(node_id).get_head() - max_op_ttl = head['metadata']['max_operations_ttl'] - expected_savepoint = expected_checkpoint - max_op_ttl - expected_caboose = 0 - utils.node_consistency_after_import( - node_id, - sandbox, - expected_level, - expected_checkpoint, - expected_savepoint, - expected_caboose, - ) - utils.full_node_blocks_availability( - node_id, sandbox, expected_savepoint, expected_level - ) - - def test_node_3_consistency_2(self, sandbox, session, legacy_stores): - # pylint: disable=unused-argument - node_id = 3 - restart(sandbox, node_id) - expected_level = session['head_level'] - expected_checkpoint = expected_level - 2 * 8 # lafl(head) - head = sandbox.client(node_id).get_head() - max_op_ttl = head['metadata']['max_operations_ttl'] - expected_savepoint = expected_checkpoint - max_op_ttl - expected_caboose = 0 - utils.node_consistency_after_import( - node_id, - sandbox, - expected_level, - expected_checkpoint, - expected_savepoint, - expected_caboose, - ) - utils.full_node_blocks_availability( - node_id, sandbox, expected_savepoint, expected_level - ) - - # For the rolling nodes - def test_node_2_consistency_2(self, sandbox, session, legacy_stores): - # pylint: disable=unused-argument - node_id = 2 - restart(sandbox, node_id) - expected_level = session['head_level'] - expected_checkpoint = expected_level - 2 * 8 # lafl(head) - head = sandbox.client(node_id).get_head() - max_op_ttl = head['metadata']['max_operations_ttl'] - expected_savepoint = expected_checkpoint - max_op_ttl - expected_caboose = expected_savepoint - utils.node_consistency_after_import( - node_id, - sandbox, - expected_level, - expected_checkpoint, - expected_savepoint, - expected_caboose, - ) - utils.rolling_node_blocks_availability( - node_id, - sandbox, - expected_savepoint, - expected_caboose, - expected_level, - ) - - def test_node_4_consistency_2(self, sandbox, session, legacy_stores): - # pylint: disable=unused-argument - node_id = 4 - restart(sandbox, node_id) - expected_level = session['head_level'] - expected_checkpoint = expected_level - 2 * 8 # lafl(head) - head = sandbox.client(node_id).get_head() - max_op_ttl = head['metadata']['max_operations_ttl'] - expected_savepoint = expected_checkpoint - max_op_ttl - expected_caboose = expected_savepoint - utils.node_consistency_after_import( - node_id, - sandbox, - expected_level, - expected_checkpoint, - expected_savepoint, - expected_caboose, - ) - utils.rolling_node_blocks_availability( - node_id, - sandbox, - expected_savepoint, - expected_caboose, - expected_level, - ) - - def test_node_5_consistency_2(self, sandbox, session, legacy_stores): - # pylint: disable=unused-argument - node_id = 5 - restart(sandbox, node_id) - expected_level = session['head_level'] - expected_checkpoint = expected_level - 2 * 8 # lafl(head) - head = sandbox.client(node_id).get_head() - max_op_ttl = head['metadata']['max_operations_ttl'] - expected_savepoint = expected_checkpoint - max_op_ttl - expected_caboose = expected_savepoint - utils.node_consistency_after_import( - node_id, - sandbox, - expected_level, - expected_checkpoint, - expected_savepoint, - expected_caboose, - ) - utils.rolling_node_blocks_availability( - node_id, - sandbox, - expected_savepoint, - expected_caboose, - expected_level, - ) diff --git a/tests_python/tests_011/test_legacy_upgrade.py b/tests_python/tests_011/test_legacy_upgrade.py deleted file mode 100644 index 5231afb17087..000000000000 --- a/tests_python/tests_011/test_legacy_upgrade.py +++ /dev/null @@ -1,212 +0,0 @@ -import pytest -from tools import utils, paths - -# Defines the number of blocks to bake in the following test. This -# constant should be higher than max_op_ttl and should be a multiple of -# the cycle length (8 in sandboxed mode) -BATCH = 160 - -EXPECTED_LEVEL = BATCH + 1 - -# FIXME: How to get this value? -MAX_OP_TTL = 120 - -# checkpoint = lafl(head) -EXPECTED_CHECKPOINT = EXPECTED_LEVEL - 16 -# savepoint = checkpoint (legacy's Full limitations) -EXPECTED_SAVEPOINT = EXPECTED_LEVEL - 16 -EXPECTED_CABOOSE = 0 -# savepoint - max_op_ttl(cp) -EXPECTED_ROLLING_CABOOSE = EXPECTED_SAVEPOINT - MAX_OP_TTL - - -def check_expected_values(head): - assert head['header']['level'] == EXPECTED_LEVEL - - -def restart(sandbox, node_id): - sandbox.node(node_id).run() - assert sandbox.client(node_id).check_node_listening() - - -def expect_wrong_version(sandbox, node): - pattern = "Found '0.0.4', expected '0.0.6'" - with utils.assert_run_failure(pattern): - sandbox.init_node(node, snapshot=None, reconstruct=False) - - -MAP = { - "batch": BATCH, - "home": paths.TEZOS_HOME, - "snapshot": False, -} - - -@pytest.mark.incremental -@pytest.mark.snapshot -@pytest.mark.slow -@pytest.mark.parametrize("legacy_stores", [MAP], indirect=True) -class TestLegacy: - - # ARCHIVE - def test_upgrade_archive(self, sandbox, nodes_legacy_store): - node1 = nodes_legacy_store['archive'] - # We init the client - client1 = sandbox.register_client(1, rpc_port=node1.rpc_port) - expect_wrong_version(sandbox, node1) - # We now run the storage upgarde - sandbox.node(1).upgrade_storage() - # After upgrading, we restart the node - restart(sandbox, 1) - sandbox.init_client(client1) - - # Checkpoints - def test_archive_consistency_1(self, sandbox, nodes_legacy_store): - # pylint: disable=unused-argument - check_expected_values(sandbox.client(1).get_head()) - assert sandbox.client(1).get_savepoint() == 0 - assert sandbox.client(1).get_caboose() == 0 - - # All blocks must be available - def test_archive_consistency_2(self, sandbox, nodes_legacy_store): - # pylint: disable=unused-argument - for i in range(EXPECTED_LEVEL): - assert utils.get_block_at_level(sandbox.client(1), i) - - # FULL - def test_upgrade_full(self, sandbox, nodes_legacy_store): - node2 = nodes_legacy_store['full'] - # We init the client - client2 = sandbox.register_client(2, rpc_port=node2.rpc_port) - expect_wrong_version(sandbox, node2) - # We now run the storage upgarde - sandbox.node(2).upgrade_storage() - # After upgrading, we restart the node - restart(sandbox, 2) - sandbox.init_client(client2) - - # Checkpoints - def test_full_consistency_1(self, sandbox, nodes_legacy_store): - # pylint: disable=unused-argument - check_expected_values(sandbox.client(2).get_head()) - savepoint = sandbox.client(2).get_savepoint() - assert savepoint == EXPECTED_SAVEPOINT - caboose = sandbox.client(2).get_caboose() - assert caboose == 0 - # the metadata of genesis are available - assert utils.get_block_at_level(sandbox.client(2), 0) - - # All block in [1; CHECKPOINT] must non be available (only headers are) - def test_full_consistency_2(self, sandbox, nodes_legacy_store): - # pylint: disable=unused-argument - for i in range(1, EXPECTED_CHECKPOINT): - utils.get_block_metadata_at_level( - sandbox.client(2), i, expect_failure=True - ) - - # All block headers in [1; CHECKPOINT] must be available - def test_full_consistency_3(self, sandbox, nodes_legacy_store): - # pylint: disable=unused-argument - for i in range(1, EXPECTED_CHECKPOINT): - utils.get_block_header_at_level(sandbox.client(2), i) - - # All blocks in [CHECKPOINT + 1; HEAD] must be available - def test_full_consistency_4(self, sandbox, nodes_legacy_store): - # pylint: disable=unused-argument - for i in range(EXPECTED_CHECKPOINT + 1, EXPECTED_LEVEL): - assert utils.get_block_at_level(sandbox.client(2), i) - - # ROLLING - def test_upgrade_rolling(self, sandbox, nodes_legacy_store): - node3 = nodes_legacy_store['rolling'] - # We init the client - client3 = sandbox.register_client(3, rpc_port=node3.rpc_port) - expect_wrong_version(sandbox, node3) - # We now run the storage upgarde - sandbox.node(3).upgrade_storage() - # After upgrading, we restart the node - restart(sandbox, 3) - sandbox.init_client(client3) - - # Checkpoints - def test_rolling_consistency_1(self, sandbox, nodes_legacy_store): - # pylint: disable=unused-argument - check_expected_values(sandbox.client(3).get_head()) - savepoint = sandbox.client(3).get_savepoint() - assert savepoint == EXPECTED_CHECKPOINT - # In rolling, caboose = savepoint - caboose = sandbox.client(3).get_caboose() - assert caboose == EXPECTED_ROLLING_CABOOSE - # the metadata of genesis are available - utils.get_block_at_level(sandbox.client(3), 0) - - # All blocks in [1 ; ROLLING_CABOOSE] must not be known - def test_rolling_consistency_2(self, sandbox, nodes_legacy_store): - # pylint: disable=unused-argument - for i in range(1, EXPECTED_ROLLING_CABOOSE): - utils.get_block_at_level(sandbox.client(3), i, expect_failure=True) - - # All blocks in [ROLLING_CABOOSE ; CHECKPOINT] must not be available - # (only headers are) - def test_rolling_consistency_3(self, sandbox, nodes_legacy_store): - # pylint: disable=unused-argument - for i in range(EXPECTED_ROLLING_CABOOSE, EXPECTED_CHECKPOINT): - utils.get_block_metadata_at_level( - sandbox.client(3), i, expect_failure=True - ) - - # All block headers in [SAVEPOINT ; CHECKPOINT] must be available - def test_rolling_consistency_4(self, sandbox, nodes_legacy_store): - # pylint: disable=unused-argument - for i in range(EXPECTED_SAVEPOINT, EXPECTED_CHECKPOINT): - utils.get_block_header_at_level(sandbox.client(3), i) - - # All blocks in [CHECKPOINT + 1; HEAD] must be available - def test_rolling_consistency_5(self, sandbox, nodes_legacy_store): - # pylint: disable=unused-argument - for i in range(EXPECTED_CHECKPOINT + 1, EXPECTED_LEVEL): - assert utils.get_block_at_level(sandbox.client(3), i) - - # Bake a few blocks to check if the Full and Rolling nodes catch up - def test_bake_to_catch_up(self, sandbox, nodes_legacy_store): - # pylint: disable=unused-argument - for _ in range(BATCH): - utils.bake(sandbox.client(1)) - - def test_archive_catch_up(self, sandbox, nodes_legacy_store): - # pylint: disable=unused-argument - head = sandbox.client(1).get_head() - expected_head = EXPECTED_LEVEL + BATCH - assert head['header']['level'] == expected_head - checkpoint = (sandbox.client(1).get_checkpoint())['block']['level'] - assert checkpoint == (expected_head - 2 * 8) - savepoint = sandbox.client(1).get_savepoint() - caboose = sandbox.client(1).get_caboose() - assert savepoint == caboose - assert caboose == 0 - - # We assume that "Full 0 mode" is now in "Full 5 mode" - def test_full_catch_up(self, sandbox, nodes_legacy_store): - # pylint: disable=unused-argument - head = sandbox.client(2).get_head() - expected_head = EXPECTED_LEVEL + BATCH - assert head['header']['level'] == expected_head - checkpoint = sandbox.client(2).get_checkpoint()['block']['level'] - assert checkpoint == (expected_head - 2 * 8) - savepoint = sandbox.client(2).get_savepoint() - assert savepoint == (checkpoint - MAX_OP_TTL) - caboose = sandbox.client(2).get_caboose() - assert caboose == 0 - - # We assume that "Rolling 0 mode" is now in "Rolling 5 mode" - def test_rolling_catch_up(self, sandbox, nodes_legacy_store): - # pylint: disable=unused-argument - head = sandbox.client(3).get_head() - expected_head = EXPECTED_LEVEL + BATCH - assert head['header']['level'] == expected_head - checkpoint = sandbox.client(3).get_checkpoint()['block']['level'] - assert checkpoint == (expected_head - 2 * 8) - savepoint = sandbox.client(3).get_savepoint() - assert savepoint == (checkpoint - MAX_OP_TTL) - caboose = sandbox.client(3).get_caboose() - assert caboose == savepoint diff --git a/tests_python/tests_alpha/conftest.py b/tests_python/tests_alpha/conftest.py index e0946c2d5396..bd7ea62b7033 100644 --- a/tests_python/tests_alpha/conftest.py +++ b/tests_python/tests_alpha/conftest.py @@ -1,7 +1,6 @@ """Protocol-specific hooks and fixtures""" import tempfile -import shutil from typing import Optional, Iterator, List import pytest from launchers.sandbox import Sandbox @@ -120,38 +119,3 @@ def mockup_client(sandbox: Sandbox) -> Iterator[Client]: ).create_mockup_result assert res == CreateMockupResult.OK yield sandbox.create_client(base_dir=base_dir, mode="mockup") - - -@pytest.fixture(scope="class") -def nodes_legacy_store(sandbox, legacy_stores): - nodes = {} - - # TODO would be cleaner to return couples (node, client) in order to - # avoid relying on the invariant that nodes are numbered 1, 2, 3 - # or just return the id? - i = 1 - for history_mode in ['archive', 'full', 'rolling']: - node_dir = legacy_stores[f'{history_mode}_path'] - # init config with up to date version - params = constants.NODE_PARAMS + ['--history-mode', history_mode] - node = sandbox.register_node(i, node_dir=node_dir, params=params) - # Workaround to allow generating an identity on an - # old 0.0.4 storage with a 0.0.6 node - version = open(node_dir + "/version.json", "w") - version.write('{ "version": "0.0.6" }') - version.close() - node.init_config() - # write version to upgrade - version = open(node_dir + "/version.json", "w") - version.write('{ "version": "0.0.4" }') - version.close() - - nodes[history_mode] = node - i += 1 - - yield nodes - - # TODO think of case of failure before `yield` - for history_mode in ['archive', 'full', 'rolling']: - node_dir = legacy_stores[f'{history_mode}_path'] - shutil.rmtree(node_dir) diff --git a/tests_python/tests_alpha/test_legacy_snapshots.py b/tests_python/tests_alpha/test_legacy_snapshots.py deleted file mode 100644 index 5685ea360ef0..000000000000 --- a/tests_python/tests_alpha/test_legacy_snapshots.py +++ /dev/null @@ -1,353 +0,0 @@ -import shutil -import pytest -from tools import utils, paths, constants - -PARAMS = constants.NODE_PARAMS - -# Defines the number of blocks to bake in the following test. This -# constant should be higher than max_op_ttl and should be a multiple -# of the cycle length (8 in sandboxed mode) -BATCH = 160 - -HOME = paths.TEZOS_HOME - -GROUP = [1, 2, 3, 4, 5] - -EXPECTED_LEVEL = BATCH + 1 - - -def check_expected_values(head): - assert head['header']['level'] == EXPECTED_LEVEL - - -def restart(sandbox, node_id): - sandbox.node(node_id).terminate_or_kill() - sandbox.node(node_id).run() - assert sandbox.client(node_id).check_node_listening() - - -def expect_wrong_version(sandbox, node): - pattern = 'Found \'0.0.4\', expected \'0.0.6\'' - with utils.assert_run_failure(pattern): - sandbox.init_node(node, snapshot=None, reconstruct=False) - - -def remove_lmdb(node): - shutil.rmtree(node.node_dir + '/lmdb_store_to_remove') - - -def clean(node): - shutil.rmtree(node.node_dir) - - -MAP = { - "batch": BATCH, - "home": paths.TEZOS_HOME, - "snapshot": True, -} - - -@pytest.mark.incremental -@pytest.mark.snapshot -@pytest.mark.slow -@pytest.mark.parametrize('legacy_stores', [MAP], indirect=True) -class TestLegacy: - - # Generate legacy stores and export all kind of snapshots - def test_generate_legacy_stores(self, legacy_stores, session): - # Store snapshot paths in session - for history_mode in ['archive', 'full']: - path_full = legacy_stores[f'from_{history_mode}.full'] - session[f'from_{history_mode}.full'] = path_full - path_rolling = legacy_stores[f'from_{history_mode}.rolling'] - session[f'from_{history_mode}.rolling'] = path_rolling - # Store the rolling path - tmp = legacy_stores['from_rolling.rolling'] - session['from_rolling.rolling'] = tmp - session['head_level'] = EXPECTED_LEVEL - - ########################################################################### - # Import all kinds of snapshots - # New node: 3 - def test_run_full_node_from_archive_1(self, sandbox, legacy_stores): - file = legacy_stores['from_archive.full'] - sandbox.add_node(3, snapshot=file, params=constants.NODE_PARAMS) - - # New node: 4 - def test_run_rolling_node_from_archive_1(self, sandbox, legacy_stores): - file = legacy_stores['from_archive.rolling'] - sandbox.add_node(4, snapshot=file, params=constants.NODE_PARAMS) - - # New node 1 - def test_reset_full_node_from_full_1(self, sandbox, legacy_stores): - file = legacy_stores['from_full.full'] - sandbox.add_node(1, snapshot=file, params=constants.NODE_PARAMS) - - # New node: 5 - def test_run_rolling_node_from_full_1(self, sandbox, legacy_stores): - file = legacy_stores['from_full.rolling'] - sandbox.add_node(5, snapshot=file, params=constants.NODE_PARAMS) - - # New node 2 - def test_reset_rolling_node_from_rolling_1(self, sandbox, legacy_stores): - file = legacy_stores['from_rolling.rolling'] - sandbox.add_node(2, snapshot=file, params=constants.NODE_PARAMS) - - ########################################################################### - # Check consistency of imported snapshots with > 5 cycles - - # For the full nodes - def test_node_1_consistency_1(self, sandbox, session, legacy_stores): - # pylint: disable=unused-argument - node_id = 1 - restart(sandbox, node_id) - expected_level = session['head_level'] - expected_checkpoint = expected_level - expected_savepoint = expected_checkpoint - expected_caboose = 0 - utils.node_consistency_after_import( - node_id, - sandbox, - expected_level, - expected_checkpoint, - expected_savepoint, - expected_caboose, - ) - utils.full_node_blocks_availability( - node_id, sandbox, expected_savepoint, expected_level - ) - - def test_node_3_consistency_1(self, sandbox, session, legacy_stores): - # pylint: disable=unused-argument - node_id = 3 - restart(sandbox, node_id) - expected_level = session['head_level'] - expected_checkpoint = expected_level - expected_savepoint = expected_checkpoint - expected_caboose = 0 - utils.node_consistency_after_import( - node_id, - sandbox, - expected_level, - expected_checkpoint, - expected_savepoint, - expected_caboose, - ) - utils.full_node_blocks_availability( - node_id, sandbox, expected_savepoint, expected_level - ) - - # For the rolling nodes - def test_node_2_consistency_1(self, sandbox, session, legacy_stores): - # pylint: disable=unused-argument - node_id = 2 - restart(sandbox, node_id) - expected_level = session['head_level'] - expected_checkpoint = expected_level - head = sandbox.client(node_id).get_head() - max_op_ttl = head['metadata']['max_operations_ttl'] - expected_savepoint = expected_checkpoint - expected_caboose = expected_checkpoint - max_op_ttl - utils.node_consistency_after_import( - node_id, - sandbox, - expected_level, - expected_checkpoint, - expected_savepoint, - expected_caboose, - ) - utils.rolling_node_blocks_availability( - node_id, - sandbox, - expected_savepoint, - expected_caboose, - expected_level, - ) - - def test_node_4_consistency_1(self, sandbox, session, legacy_stores): - # pylint: disable=unused-argument - node_id = 4 - restart(sandbox, node_id) - expected_level = session['head_level'] - expected_checkpoint = expected_level - head = sandbox.client(node_id).get_head() - max_op_ttl = head['metadata']['max_operations_ttl'] - expected_savepoint = expected_checkpoint - expected_caboose = expected_checkpoint - max_op_ttl - utils.node_consistency_after_import( - node_id, - sandbox, - expected_level, - expected_checkpoint, - expected_savepoint, - expected_caboose, - ) - utils.rolling_node_blocks_availability( - node_id, - sandbox, - expected_savepoint, - expected_caboose, - expected_level, - ) - - def test_node_5_consistency_1(self, sandbox, session, legacy_stores): - # pylint: disable=unused-argument - node_id = 5 - restart(sandbox, node_id) - expected_level = session['head_level'] - expected_checkpoint = expected_level - head = sandbox.client(node_id).get_head() - max_op_ttl = head['metadata']['max_operations_ttl'] - expected_savepoint = expected_checkpoint - expected_caboose = expected_checkpoint - max_op_ttl - utils.node_consistency_after_import( - node_id, - sandbox, - expected_level, - expected_checkpoint, - expected_savepoint, - expected_caboose, - ) - utils.rolling_node_blocks_availability( - node_id, - sandbox, - expected_savepoint, - expected_caboose, - expected_level, - ) - - # Bake a few blocks to check if the Full and Rolling nodes catch up - def test_bake_to_catch_up(self, sandbox, session, legacy_stores): - # pylint: disable=unused-argument - for _ in range(BATCH): - utils.bake(sandbox.client(1)) - session['head_level'] = sandbox.client(1).get_head()['header']['level'] - for i in GROUP: - assert utils.check_level(sandbox.client(i), session['head_level']) - - ########################################################################### - # Check consistency of imported snapshots with > 5 cycles - - # For the full nodes - def test_node_1_consistency_2(self, sandbox, session, legacy_stores): - # pylint: disable=unused-argument - node_id = 1 - restart(sandbox, node_id) - expected_level = session['head_level'] - expected_checkpoint = expected_level - 2 * 8 # lafl(head) - head = sandbox.client(node_id).get_head() - max_op_ttl = head['metadata']['max_operations_ttl'] - expected_savepoint = expected_checkpoint - max_op_ttl - expected_caboose = 0 - utils.node_consistency_after_import( - node_id, - sandbox, - expected_level, - expected_checkpoint, - expected_savepoint, - expected_caboose, - ) - utils.full_node_blocks_availability( - node_id, sandbox, expected_savepoint, expected_level - ) - - def test_node_3_consistency_2(self, sandbox, session, legacy_stores): - # pylint: disable=unused-argument - node_id = 3 - restart(sandbox, node_id) - expected_level = session['head_level'] - expected_checkpoint = expected_level - 2 * 8 # lafl(head) - head = sandbox.client(node_id).get_head() - max_op_ttl = head['metadata']['max_operations_ttl'] - expected_savepoint = expected_checkpoint - max_op_ttl - expected_caboose = 0 - utils.node_consistency_after_import( - node_id, - sandbox, - expected_level, - expected_checkpoint, - expected_savepoint, - expected_caboose, - ) - utils.full_node_blocks_availability( - node_id, sandbox, expected_savepoint, expected_level - ) - - # For the rolling nodes - def test_node_2_consistency_2(self, sandbox, session, legacy_stores): - # pylint: disable=unused-argument - node_id = 2 - restart(sandbox, node_id) - expected_level = session['head_level'] - expected_checkpoint = expected_level - 2 * 8 # lafl(head) - head = sandbox.client(node_id).get_head() - max_op_ttl = head['metadata']['max_operations_ttl'] - expected_savepoint = expected_checkpoint - max_op_ttl - expected_caboose = expected_savepoint - utils.node_consistency_after_import( - node_id, - sandbox, - expected_level, - expected_checkpoint, - expected_savepoint, - expected_caboose, - ) - utils.rolling_node_blocks_availability( - node_id, - sandbox, - expected_savepoint, - expected_caboose, - expected_level, - ) - - def test_node_4_consistency_2(self, sandbox, session, legacy_stores): - # pylint: disable=unused-argument - node_id = 4 - restart(sandbox, node_id) - expected_level = session['head_level'] - expected_checkpoint = expected_level - 2 * 8 # lafl(head) - head = sandbox.client(node_id).get_head() - max_op_ttl = head['metadata']['max_operations_ttl'] - expected_savepoint = expected_checkpoint - max_op_ttl - expected_caboose = expected_savepoint - utils.node_consistency_after_import( - node_id, - sandbox, - expected_level, - expected_checkpoint, - expected_savepoint, - expected_caboose, - ) - utils.rolling_node_blocks_availability( - node_id, - sandbox, - expected_savepoint, - expected_caboose, - expected_level, - ) - - def test_node_5_consistency_2(self, sandbox, session, legacy_stores): - # pylint: disable=unused-argument - node_id = 5 - restart(sandbox, node_id) - expected_level = session['head_level'] - expected_checkpoint = expected_level - 2 * 8 # lafl(head) - head = sandbox.client(node_id).get_head() - max_op_ttl = head['metadata']['max_operations_ttl'] - expected_savepoint = expected_checkpoint - max_op_ttl - expected_caboose = expected_savepoint - utils.node_consistency_after_import( - node_id, - sandbox, - expected_level, - expected_checkpoint, - expected_savepoint, - expected_caboose, - ) - utils.rolling_node_blocks_availability( - node_id, - sandbox, - expected_savepoint, - expected_caboose, - expected_level, - ) diff --git a/tests_python/tests_alpha/test_legacy_upgrade.py b/tests_python/tests_alpha/test_legacy_upgrade.py deleted file mode 100644 index 5231afb17087..000000000000 --- a/tests_python/tests_alpha/test_legacy_upgrade.py +++ /dev/null @@ -1,212 +0,0 @@ -import pytest -from tools import utils, paths - -# Defines the number of blocks to bake in the following test. This -# constant should be higher than max_op_ttl and should be a multiple of -# the cycle length (8 in sandboxed mode) -BATCH = 160 - -EXPECTED_LEVEL = BATCH + 1 - -# FIXME: How to get this value? -MAX_OP_TTL = 120 - -# checkpoint = lafl(head) -EXPECTED_CHECKPOINT = EXPECTED_LEVEL - 16 -# savepoint = checkpoint (legacy's Full limitations) -EXPECTED_SAVEPOINT = EXPECTED_LEVEL - 16 -EXPECTED_CABOOSE = 0 -# savepoint - max_op_ttl(cp) -EXPECTED_ROLLING_CABOOSE = EXPECTED_SAVEPOINT - MAX_OP_TTL - - -def check_expected_values(head): - assert head['header']['level'] == EXPECTED_LEVEL - - -def restart(sandbox, node_id): - sandbox.node(node_id).run() - assert sandbox.client(node_id).check_node_listening() - - -def expect_wrong_version(sandbox, node): - pattern = "Found '0.0.4', expected '0.0.6'" - with utils.assert_run_failure(pattern): - sandbox.init_node(node, snapshot=None, reconstruct=False) - - -MAP = { - "batch": BATCH, - "home": paths.TEZOS_HOME, - "snapshot": False, -} - - -@pytest.mark.incremental -@pytest.mark.snapshot -@pytest.mark.slow -@pytest.mark.parametrize("legacy_stores", [MAP], indirect=True) -class TestLegacy: - - # ARCHIVE - def test_upgrade_archive(self, sandbox, nodes_legacy_store): - node1 = nodes_legacy_store['archive'] - # We init the client - client1 = sandbox.register_client(1, rpc_port=node1.rpc_port) - expect_wrong_version(sandbox, node1) - # We now run the storage upgarde - sandbox.node(1).upgrade_storage() - # After upgrading, we restart the node - restart(sandbox, 1) - sandbox.init_client(client1) - - # Checkpoints - def test_archive_consistency_1(self, sandbox, nodes_legacy_store): - # pylint: disable=unused-argument - check_expected_values(sandbox.client(1).get_head()) - assert sandbox.client(1).get_savepoint() == 0 - assert sandbox.client(1).get_caboose() == 0 - - # All blocks must be available - def test_archive_consistency_2(self, sandbox, nodes_legacy_store): - # pylint: disable=unused-argument - for i in range(EXPECTED_LEVEL): - assert utils.get_block_at_level(sandbox.client(1), i) - - # FULL - def test_upgrade_full(self, sandbox, nodes_legacy_store): - node2 = nodes_legacy_store['full'] - # We init the client - client2 = sandbox.register_client(2, rpc_port=node2.rpc_port) - expect_wrong_version(sandbox, node2) - # We now run the storage upgarde - sandbox.node(2).upgrade_storage() - # After upgrading, we restart the node - restart(sandbox, 2) - sandbox.init_client(client2) - - # Checkpoints - def test_full_consistency_1(self, sandbox, nodes_legacy_store): - # pylint: disable=unused-argument - check_expected_values(sandbox.client(2).get_head()) - savepoint = sandbox.client(2).get_savepoint() - assert savepoint == EXPECTED_SAVEPOINT - caboose = sandbox.client(2).get_caboose() - assert caboose == 0 - # the metadata of genesis are available - assert utils.get_block_at_level(sandbox.client(2), 0) - - # All block in [1; CHECKPOINT] must non be available (only headers are) - def test_full_consistency_2(self, sandbox, nodes_legacy_store): - # pylint: disable=unused-argument - for i in range(1, EXPECTED_CHECKPOINT): - utils.get_block_metadata_at_level( - sandbox.client(2), i, expect_failure=True - ) - - # All block headers in [1; CHECKPOINT] must be available - def test_full_consistency_3(self, sandbox, nodes_legacy_store): - # pylint: disable=unused-argument - for i in range(1, EXPECTED_CHECKPOINT): - utils.get_block_header_at_level(sandbox.client(2), i) - - # All blocks in [CHECKPOINT + 1; HEAD] must be available - def test_full_consistency_4(self, sandbox, nodes_legacy_store): - # pylint: disable=unused-argument - for i in range(EXPECTED_CHECKPOINT + 1, EXPECTED_LEVEL): - assert utils.get_block_at_level(sandbox.client(2), i) - - # ROLLING - def test_upgrade_rolling(self, sandbox, nodes_legacy_store): - node3 = nodes_legacy_store['rolling'] - # We init the client - client3 = sandbox.register_client(3, rpc_port=node3.rpc_port) - expect_wrong_version(sandbox, node3) - # We now run the storage upgarde - sandbox.node(3).upgrade_storage() - # After upgrading, we restart the node - restart(sandbox, 3) - sandbox.init_client(client3) - - # Checkpoints - def test_rolling_consistency_1(self, sandbox, nodes_legacy_store): - # pylint: disable=unused-argument - check_expected_values(sandbox.client(3).get_head()) - savepoint = sandbox.client(3).get_savepoint() - assert savepoint == EXPECTED_CHECKPOINT - # In rolling, caboose = savepoint - caboose = sandbox.client(3).get_caboose() - assert caboose == EXPECTED_ROLLING_CABOOSE - # the metadata of genesis are available - utils.get_block_at_level(sandbox.client(3), 0) - - # All blocks in [1 ; ROLLING_CABOOSE] must not be known - def test_rolling_consistency_2(self, sandbox, nodes_legacy_store): - # pylint: disable=unused-argument - for i in range(1, EXPECTED_ROLLING_CABOOSE): - utils.get_block_at_level(sandbox.client(3), i, expect_failure=True) - - # All blocks in [ROLLING_CABOOSE ; CHECKPOINT] must not be available - # (only headers are) - def test_rolling_consistency_3(self, sandbox, nodes_legacy_store): - # pylint: disable=unused-argument - for i in range(EXPECTED_ROLLING_CABOOSE, EXPECTED_CHECKPOINT): - utils.get_block_metadata_at_level( - sandbox.client(3), i, expect_failure=True - ) - - # All block headers in [SAVEPOINT ; CHECKPOINT] must be available - def test_rolling_consistency_4(self, sandbox, nodes_legacy_store): - # pylint: disable=unused-argument - for i in range(EXPECTED_SAVEPOINT, EXPECTED_CHECKPOINT): - utils.get_block_header_at_level(sandbox.client(3), i) - - # All blocks in [CHECKPOINT + 1; HEAD] must be available - def test_rolling_consistency_5(self, sandbox, nodes_legacy_store): - # pylint: disable=unused-argument - for i in range(EXPECTED_CHECKPOINT + 1, EXPECTED_LEVEL): - assert utils.get_block_at_level(sandbox.client(3), i) - - # Bake a few blocks to check if the Full and Rolling nodes catch up - def test_bake_to_catch_up(self, sandbox, nodes_legacy_store): - # pylint: disable=unused-argument - for _ in range(BATCH): - utils.bake(sandbox.client(1)) - - def test_archive_catch_up(self, sandbox, nodes_legacy_store): - # pylint: disable=unused-argument - head = sandbox.client(1).get_head() - expected_head = EXPECTED_LEVEL + BATCH - assert head['header']['level'] == expected_head - checkpoint = (sandbox.client(1).get_checkpoint())['block']['level'] - assert checkpoint == (expected_head - 2 * 8) - savepoint = sandbox.client(1).get_savepoint() - caboose = sandbox.client(1).get_caboose() - assert savepoint == caboose - assert caboose == 0 - - # We assume that "Full 0 mode" is now in "Full 5 mode" - def test_full_catch_up(self, sandbox, nodes_legacy_store): - # pylint: disable=unused-argument - head = sandbox.client(2).get_head() - expected_head = EXPECTED_LEVEL + BATCH - assert head['header']['level'] == expected_head - checkpoint = sandbox.client(2).get_checkpoint()['block']['level'] - assert checkpoint == (expected_head - 2 * 8) - savepoint = sandbox.client(2).get_savepoint() - assert savepoint == (checkpoint - MAX_OP_TTL) - caboose = sandbox.client(2).get_caboose() - assert caboose == 0 - - # We assume that "Rolling 0 mode" is now in "Rolling 5 mode" - def test_rolling_catch_up(self, sandbox, nodes_legacy_store): - # pylint: disable=unused-argument - head = sandbox.client(3).get_head() - expected_head = EXPECTED_LEVEL + BATCH - assert head['header']['level'] == expected_head - checkpoint = sandbox.client(3).get_checkpoint()['block']['level'] - assert checkpoint == (expected_head - 2 * 8) - savepoint = sandbox.client(3).get_savepoint() - assert savepoint == (checkpoint - MAX_OP_TTL) - caboose = sandbox.client(3).get_caboose() - assert caboose == savepoint -- GitLab From 223782f8bbb8c160be9812344b42d4a0ea4f7724 Mon Sep 17 00:00:00 2001 From: Victor Allombert Date: Fri, 10 Dec 2021 09:03:58 +0100 Subject: [PATCH 2/6] Node: remove legacy store upgrade --- src/bin_node/node_data_version.ml | 34 ++----------------------------- 1 file changed, 2 insertions(+), 32 deletions(-) diff --git a/src/bin_node/node_data_version.ml b/src/bin_node/node_data_version.ml index 8759cb359adc..a78f3bffd14b 100644 --- a/src/bin_node/node_data_version.ml +++ b/src/bin_node/node_data_version.ml @@ -58,18 +58,7 @@ let data_version = "0.0.6" much, an idea would be to have triples (version, version, converter), and to sequence them dynamically instead of statically. *) -let upgradable_data_version = - [ - ( "0.0.4", - fun ~data_dir genesis ~chain_name ~sandbox_parameters -> - let patch_context = - Patch_context.patch_context genesis sandbox_parameters - in - Legacy.upgrade_0_0_4 ~data_dir ~patch_context ~chain_name genesis ); - ( "0.0.5", - fun ~data_dir genesis ~chain_name:_ ~sandbox_parameters:_ -> - Legacy.upgrade_0_0_5 ~data_dir genesis ); - ] +let upgradable_data_version = [] let version_encoding = Data_encoding.(obj1 (req "version" string)) @@ -221,17 +210,6 @@ module Events = struct ~pp2:Format.pp_print_string ("available_version", Data_encoding.string) - let legacy_store_is_present = - declare_1 - ~section - ~level:Notice - ~name:"legacy_store_is_present" - ~msg: - "the former store is present at '{legacy_store_path}' and may be \ - removed to save disk space if the upgrade process went well" - ~pp1:Format.pp_print_string - ("legacy_store_path", Data_encoding.string) - let emit = Internal_event.Simple.emit end @@ -314,17 +292,9 @@ let ensure_data_dir bare data_dir = | Unix.Unix_error _ -> fail (Invalid_data_dir {data_dir; msg = None}) | exc -> raise exc) -let check_data_dir_legacy_artifact data_dir = - let lmdb_store_artifact_path = Legacy.temporary_former_store_path ~data_dir in - Lwt_unix.file_exists lmdb_store_artifact_path >>= function - | true -> Events.(emit legacy_store_is_present) lmdb_store_artifact_path - | false -> Lwt.return_unit - let upgrade_data_dir ~data_dir genesis ~chain_name ~sandbox_parameters = ensure_data_dir false data_dir >>=? function - | None -> - Events.(emit dir_is_up_to_date ()) >>= fun () -> - check_data_dir_legacy_artifact data_dir >>= fun () -> return_unit + | None -> Events.(emit dir_is_up_to_date ()) >>= return | Some (version, upgrade) -> ( Events.(emit upgrading_node (version, data_version)) >>= fun () -> upgrade ~data_dir genesis ~chain_name ~sandbox_parameters >>= function -- GitLab From 472203c31a9494bacafa03c083917d21e595d094 Mon Sep 17 00:00:00 2001 From: Victor Allombert Date: Fri, 10 Dec 2021 09:11:09 +0100 Subject: [PATCH 3/6] Store/Context: remove lagacy snapshot support --- src/lib_context/context.ml | 548 -------------------- src/lib_context/context.mli | 126 ----- src/lib_context/context_dump.ml | 666 +------------------------ src/lib_context/context_dump.mli | 2 - src/lib_context/context_dump_intf.ml | 86 ---- src/lib_shell_services/store_errors.ml | 26 - src/lib_store/snapshots.ml | 452 +---------------- src/lib_store/snapshots.mli | 27 +- 8 files changed, 13 insertions(+), 1920 deletions(-) diff --git a/src/lib_context/context.ml b/src/lib_context/context.ml index 107d809a63a8..2fcf3051b54a 100644 --- a/src/lib_context/context.ml +++ b/src/lib_context/context.ml @@ -752,363 +752,6 @@ module Dumpable_context = struct module Block_header = Block_header end -(* Context dumping: legacy *) - -module Protocol_data_legacy = struct - type t = Int32.t * data - - and info = {author : string; message : string; timestamp : Time.Protocol.t} - - and data = { - info : info; - protocol_hash : Protocol_hash.t; - test_chain_status : Test_chain_status.t; - data_key : Context_hash.t; - predecessor_block_metadata_hash : Block_metadata_hash.t option; - predecessor_ops_metadata_hash : Operation_metadata_list_list_hash.t option; - parents : Context_hash.t list; - } - - let info_encoding = - let open Data_encoding in - conv - (fun {author; message; timestamp} -> (author, message, timestamp)) - (fun (author, message, timestamp) -> {author; message; timestamp}) - (obj3 - (req "author" string) - (req "message" string) - (req "timestamp" Time.Protocol.encoding)) - - let data_encoding = - let open Data_encoding in - conv - (fun { - predecessor_block_metadata_hash; - predecessor_ops_metadata_hash; - info; - protocol_hash; - test_chain_status; - data_key; - parents; - } -> - ( predecessor_block_metadata_hash, - predecessor_ops_metadata_hash, - info, - protocol_hash, - test_chain_status, - data_key, - parents )) - (fun ( predecessor_block_metadata_hash, - predecessor_ops_metadata_hash, - info, - protocol_hash, - test_chain_status, - data_key, - parents ) -> - { - predecessor_block_metadata_hash; - predecessor_ops_metadata_hash; - info; - protocol_hash; - test_chain_status; - data_key; - parents; - }) - (obj7 - (opt "predecessor_block_metadata_hash" Block_metadata_hash.encoding) - (opt - "predecessor_ops_metadata_hash" - Operation_metadata_list_list_hash.encoding) - (req "info" info_encoding) - (req "protocol_hash" Protocol_hash.encoding) - (req "test_chain_status" Test_chain_status.encoding) - (req "data_key" Context_hash.encoding) - (req "parents" (list Context_hash.encoding))) - - (* This version didn't include the optional fields - [predecessor_block_metadata_hash] and [predecessor_ops_metadata_hashes], - but we can still restore this version by setting these to [None]. *) - let data_encoding_1_0_0 = - let open Data_encoding in - conv - (fun { - predecessor_block_metadata_hash = _; - predecessor_ops_metadata_hash = _; - info; - protocol_hash; - test_chain_status; - data_key; - parents; - } -> (info, protocol_hash, test_chain_status, data_key, parents)) - (fun (info, protocol_hash, test_chain_status, data_key, parents) -> - { - predecessor_block_metadata_hash = None; - predecessor_ops_metadata_hash = None; - info; - protocol_hash; - test_chain_status; - data_key; - parents; - }) - (obj5 - (req "info" info_encoding) - (req "protocol_hash" Protocol_hash.encoding) - (req "test_chain_status" Test_chain_status.encoding) - (req "data_key" Context_hash.encoding) - (req "parents" (list Context_hash.encoding))) - - let encoding = - let open Data_encoding in - tup2 int32 data_encoding - - let encoding_1_0_0 = - let open Data_encoding in - tup2 int32 data_encoding_1_0_0 - - let to_bytes = Data_encoding.Binary.to_bytes_exn encoding - - let of_bytes = Data_encoding.Binary.of_bytes_opt encoding -end - -module Block_data_legacy = struct - type t = {block_header : Block_header.t; operations : Operation.t list list} - - let encoding = - let open Data_encoding in - conv - (fun {block_header; operations} -> (operations, block_header)) - (fun (operations, block_header) -> {block_header; operations}) - (obj2 - (req "operations" (list (list (dynamic_size Operation.encoding)))) - (req "block_header" Block_header.encoding)) - - let to_bytes = Data_encoding.Binary.to_bytes_exn encoding - - let of_bytes = Data_encoding.Binary.of_bytes_opt encoding - - let header {block_header; _} = block_header -end - -module Pruned_block_legacy = struct - type t = { - block_header : Block_header.t; - operations : (int * Operation.t list) list; - operation_hashes : (int * Operation_hash.t list) list; - } - - let encoding = - let open Data_encoding in - conv - (fun {block_header; operations; operation_hashes} -> - (operations, operation_hashes, block_header)) - (fun (operations, operation_hashes, block_header) -> - {block_header; operations; operation_hashes}) - (obj3 - (req - "operations" - (list (tup2 int31 (list (dynamic_size Operation.encoding))))) - (req - "operation_hashes" - (list (tup2 int31 (list (dynamic_size Operation_hash.encoding))))) - (req "block_header" Block_header.encoding)) - - let to_bytes pruned_block = - Data_encoding.Binary.to_bytes_exn encoding pruned_block - - let of_bytes pruned_block = - Data_encoding.Binary.of_bytes_opt encoding pruned_block - - let header {block_header; _} = block_header -end - -module Dumpable_context_legacy = struct - type nonrec index = index - - type nonrec context = context - - type tree = Store.tree - - type hash = Store.hash - - module Kinded_hash = struct - type t = [`Blob of hash | `Node of hash] - - let encoding : t Data_encoding.t = - let open Data_encoding in - let kind_encoding = string_enum [("node", `Node); ("blob", `Blob)] in - conv - (function - | `Blob h -> (`Blob, Context_hash.to_bytes (Hash.to_context_hash h)) - | `Node h -> (`Node, Context_hash.to_bytes (Hash.to_context_hash h))) - (function - | (`Blob, h) -> - `Blob (Hash.of_context_hash (Context_hash.of_bytes_exn h)) - | (`Node, h) -> - `Node (Hash.of_context_hash (Context_hash.of_bytes_exn h))) - (obj2 (req "kind" kind_encoding) (req "value" bytes)) - end - - type commit_info = Info.t - - type batch = - | Batch of - Store.repo * [`Read | `Write] P.Contents.t * [`Read | `Write] P.Node.t - - let batch index f = - P.Repo.batch index.repo (fun x y _ -> f (Batch (index.repo, x, y))) - - let commit_info_encoding = - let open Data_encoding in - conv - (fun irmin_info -> - let author = Info.author irmin_info in - let message = Info.message irmin_info in - let date = Info.date irmin_info in - (author, message, date)) - (fun (author, message, date) -> Info.v ~author ~date message) - (obj3 (req "author" string) (req "message" string) (req "date" int64)) - - let hash_equal (h1 : hash) (h2 : hash) = h1 = h2 - - let context_parents ctxt = - match ctxt with - | {parents = [commit]; _} -> - let parents = Store.Commit.parents commit in - let parents = List.map Hash.to_context_hash parents in - List.sort Context_hash.compare parents - | _ -> assert false - - let context_info = function - | {parents = [c]; _} -> Store.Commit.info c - | _ -> assert false - - let get_context idx bh = checkout idx bh.Block_header.shell.context - - let set_context ~info ~parents ctxt bh = - let parents = List.sort Context_hash.compare parents in - let parents = List.map Hash.of_context_hash parents in - Store.Commit.v ctxt.index.repo ~info ~parents ctxt.tree >>= fun c -> - let h = Store.Commit.hash c in - Lwt.return - (Context_hash.equal - bh.Block_header.shell.context - (Hash.to_context_hash h)) - - let context_tree ctxt = ctxt.tree - - type binding = { - key : string; - value : tree; - value_kind : [`Node | `Contents]; - value_hash : hash; - } - - (** Unpack the bindings in a tree node (in lexicographic order) and clear its - internal cache. *) - let bindings tree : binding list Lwt.t = - Store.Tree.list tree [] >>= fun keys -> - keys - |> List.sort (fun (a, _) (b, _) -> String.compare a b) - |> List.map_s (fun (key, value) -> - Store.Tree.kind value [] >|= function - | None -> - (* The value must exist in the tree, because we're - iterating over existing keys *) - assert false - | Some value_kind -> - let value_hash = Store.Tree.hash value in - {key; value; value_kind; value_hash}) - >|= fun bindings -> - Store.Tree.clear tree ; - bindings - - module Hashset = struct - module String_set = Utils.String_set - - let create () = - String_set.create ~elt_length:Hash.hash_size ~initial_capacity:100_000 - - let mem t h = String_set.mem t (Hash.to_raw_string h) - - let add t h = String_set.add t (Hash.to_raw_string h) - end - - let tree_iteri_unique f tree = - let total_visited = ref 0 in - (* Noting the visited hashes *) - let visited_hash = Hashset.create () in - let visited h = Hashset.mem visited_hash h in - let set_visit h = - incr total_visited ; - Hashset.add visited_hash h - in - let rec aux : type a. tree -> (unit -> a) -> a Lwt.t = - fun tree k -> - bindings tree - >>= List.map_s (fun {key; value; value_hash; value_kind} -> - let kinded_value_hash = - match value_kind with - | `Node -> `Node value_hash - | `Contents -> `Blob value_hash - in - let kv = (key, kinded_value_hash) in - if visited value_hash then Lwt.return kv - else - match value_kind with - | `Node -> - (* Visit children first, in left-to-right order. *) - (aux [@ocaml.tailcall]) value (fun () -> - (* There cannot be a cycle. *) - set_visit value_hash ; - kv) - | `Contents -> - Store.Tree.get value [] >>= fun data -> - f !total_visited (`Leaf data) >|= fun () -> - set_visit value_hash ; - kv) - >>= fun sub_keys -> f !total_visited (`Branch sub_keys) >|= k - in - aux tree Fun.id - - let make_context index = - {index; tree = Store.Tree.empty; parents = []; ops = 0} - - let update_context context tree = {context with tree} - - let add_hash (Batch (repo, _, _)) tree key hash = - let irmin_hash = - match hash with `Blob hash -> `Contents (hash, ()) | `Node _ as n -> n - in - Store.Tree.of_hash repo irmin_hash >>= function - | None -> Lwt.return_none - | Some t -> Store.Tree.add_tree tree key (t :> tree) >>= Lwt.return_some - - let add_bytes (Batch (_, t, _)) b = - (* Save the contents in the store *) - Store.save_contents t b >|= fun _ -> Store.Tree.of_contents b - - let add_dir batch l = - let rec fold_list sub_tree = function - | [] -> Lwt.return_some sub_tree - | (step, hash) :: tl -> ( - add_hash batch sub_tree [step] hash >>= function - | None -> Lwt.return_none - | Some sub_tree -> fold_list sub_tree tl) - in - fold_list Store.Tree.empty l >>= function - | None -> Lwt.return_none - | Some tree -> - let (Batch (repo, x, y)) = batch in - (* Save the node in the store ... *) - Store.save_tree ~clear:true repo x y tree >|= fun _ -> Some tree - - module Commit_hash = Context_hash - module Block_header = Block_header - module Block_data = Block_data_legacy - module Pruned_block = Pruned_block_legacy - module Protocol_data = Protocol_data_legacy -end - (* Protocol data *) let data_node_hash context = @@ -1213,7 +856,6 @@ let check_protocol_commit_consistency index ~expected_context_hash (* Context dumper *) module Context_dumper = Context_dump.Make (Dumpable_context) -module Context_dumper_legacy = Context_dump.Make_legacy (Dumpable_context_legacy) (* provides functions dump_context and restore_context *) let dump_context idx data ~fd = @@ -1226,193 +868,3 @@ let restore_context idx ~expected_context_hash ~nb_context_elements ~fd = ~expected_context_hash ~fd ~nb_context_elements - -let legacy_restore_contexts idx ~filename k_store_pruned_block - pipeline_validation = - let file_init () = - Lwt_unix.openfile filename Lwt_unix.[O_RDONLY; O_CLOEXEC] 0o600 >>= return - in - Lwt.catch file_init (function - | Unix.Unix_error (e, _, _) -> - fail @@ Cannot_open_file (Unix.error_message e) - | exc -> - let msg = - Printf.sprintf "unknown error: %s" (Printexc.to_string exc) - in - fail (Cannot_open_file msg)) - >>=? fun fd -> - Lwt.finalize - (fun () -> - Context_dumper_legacy.legacy_restore_contexts_fd - idx - ~fd - k_store_pruned_block - pipeline_validation - >>=? fun result -> - Lwt_unix.lseek fd 0 Lwt_unix.SEEK_CUR >>= fun current -> - Lwt_unix.fstat fd >>= fun stats -> - let total = stats.Lwt_unix.st_size in - if current = total then return result - else fail @@ Suspicious_file (total - current)) - (fun () -> Lwt_unix.close fd) - -let legacy_get_protocol_data_from_header index block_header = - let open Protocol_data_legacy in - checkout_exn index block_header.Block_header.shell.context >>= fun context -> - let level = block_header.shell.level in - let irmin_info = Dumpable_context.context_info context in - let date = Info.date irmin_info in - let author = Info.author irmin_info in - let message = Info.message irmin_info in - let info = {timestamp = Time.Protocol.of_seconds date; author; message} in - let parents = Dumpable_context.context_parents context in - get_protocol context >>= fun protocol_hash -> - get_test_chain context >>= fun test_chain_status -> - find_predecessor_block_metadata_hash context - >>= fun predecessor_block_metadata_hash -> - find_predecessor_ops_metadata_hash context - >>= fun predecessor_ops_metadata_hash -> - data_node_hash context >>= fun data_key -> - Lwt.return - ( level, - { - predecessor_block_metadata_hash; - predecessor_ops_metadata_hash; - parents; - protocol_hash; - test_chain_status; - data_key; - info; - } ) - -let legacy_restore_context ?expected_block idx ~snapshot_file ~handle_block - ~handle_protocol_data ~block_validation = - Lwt.catch - (fun () -> Lwt_unix.openfile snapshot_file [Unix.O_RDONLY] 0o600 >>= return) - (function - | Unix.Unix_error (e, _, _) -> - fail (Cannot_open_file (Unix.error_message e)) - | exc -> - let msg = - Printf.sprintf "unknown error: %s" (Printexc.to_string exc) - in - fail (Cannot_open_file msg)) - >>=? fun fd -> - Lwt.finalize - (fun () -> - Context_dumper_legacy.restore_context_fd - idx - ~fd - ?expected_block - ~handle_block - ~handle_protocol_data - ~block_validation - >>=? fun result -> - Lwt_unix.lseek fd 0 Lwt_unix.SEEK_CUR >>= fun current -> - Lwt_unix.fstat fd >>= fun stats -> - let total = stats.Lwt_unix.st_size in - if current = total then return result - else fail (Suspicious_file (total - current))) - (fun () -> Lwt_unix.close fd) - -let legacy_read_metadata ~snapshot_file = - Lwt.catch - (fun () -> Lwt_unix.openfile snapshot_file [Unix.O_RDONLY] 0o600 >>= return) - (function - | Unix.Unix_error (e, _, _) -> - fail (Cannot_open_file (Unix.error_message e)) - | exc -> - let msg = - Printf.sprintf "unknown error: %s" (Printexc.to_string exc) - in - fail (Cannot_open_file msg)) - >>=? fun fd -> - Lwt.finalize - (fun () -> Context_dumper_legacy.get_snapshot_metadata ~snapshot_fd:fd) - (fun () -> Lwt_unix.close fd) - -(* For testing purposes only *) -let legacy_dump_snapshot idx datas ~filename = - Lwt.catch - (fun () -> - Lwt_unix.openfile filename Lwt_unix.[O_WRONLY; O_CREAT; O_TRUNC] 0o666 - >>= fun fd -> - Lwt.finalize - (fun () -> Context_dumper_legacy.dump_contexts_fd idx datas ~fd) - (fun () -> Lwt_unix.close fd)) - (function - | Unix.Unix_error (e, _, _) -> - fail @@ Cannot_create_file (Unix.error_message e) - | exc -> - let msg = - Printf.sprintf "unknown error: %s" (Printexc.to_string exc) - in - fail (Cannot_create_file msg)) - -(* For testing purposes only *) -let validate_context_hash_consistency_and_commit ~data_hash - ~expected_context_hash ~timestamp ~test_chain ~protocol_hash ~message - ~author ~parents ~predecessor_block_metadata_hash - ~predecessor_ops_metadata_hash ~index = - let data_hash = Hash.of_context_hash data_hash in - let parents = List.map Hash.of_context_hash parents in - let protocol_value = Protocol_hash.to_bytes protocol_hash in - let test_chain_value = - Data_encoding.Binary.to_bytes_exn Test_chain_status.encoding test_chain - in - let tree = Store.Tree.empty in - Store.Tree.add tree current_protocol_key protocol_value >>= fun tree -> - Store.Tree.add tree current_test_chain_key test_chain_value >>= fun tree -> - (match predecessor_block_metadata_hash with - | Some predecessor_block_metadata_hash -> - let predecessor_block_metadata_hash_value = - Block_metadata_hash.to_bytes predecessor_block_metadata_hash - in - Store.Tree.add - tree - current_predecessor_block_metadata_hash_key - predecessor_block_metadata_hash_value - | None -> Lwt.return tree) - >>= fun tree -> - (match predecessor_ops_metadata_hash with - | Some predecessor_ops_metadata_hash -> - let predecessor_ops_metadata_hash_value = - Operation_metadata_list_list_hash.to_bytes predecessor_ops_metadata_hash - in - Store.Tree.add - tree - current_predecessor_ops_metadata_hash_key - predecessor_ops_metadata_hash_value - | None -> Lwt.return tree) - >>= fun tree -> - let info = - Info.v ~author ~date:(Time.Protocol.to_seconds timestamp) message - in - let data_tree = Store.Tree.shallow index.repo (`Node data_hash) in - Store.Tree.add_tree tree current_data_key data_tree >>= fun node -> - let node = Store.Tree.hash node in - let commit = P.Commit.Val.v ~parents ~node ~info in - let computed_context_hash = Hash.to_context_hash (P.Commit.Key.hash commit) in - if Context_hash.equal expected_context_hash computed_context_hash then - let ctxt = - let parent = Store.of_private_commit index.repo commit in - {index; tree = Store.Tree.empty; parents = [parent]; ops = 0} - in - add_test_chain ctxt test_chain >>= fun ctxt -> - add_protocol ctxt protocol_hash >>= fun ctxt -> - (match predecessor_block_metadata_hash with - | Some predecessor_block_metadata_hash -> - add_predecessor_block_metadata_hash ctxt predecessor_block_metadata_hash - | None -> Lwt.return ctxt) - >>= fun ctxt -> - (match predecessor_ops_metadata_hash with - | Some predecessor_ops_metadata_hash -> - add_predecessor_ops_metadata_hash ctxt predecessor_ops_metadata_hash - | None -> Lwt.return ctxt) - >>= fun ctxt -> - let data_t = Store.Tree.shallow index.repo (`Node data_hash) in - Store.Tree.add_tree ctxt.tree current_data_key data_t >>= fun new_tree -> - Store.Commit.v ctxt.index.repo ~info ~parents new_tree >|= fun commit -> - let ctxt_h = Hash.to_context_hash (Store.Commit.hash commit) in - Context_hash.equal ctxt_h expected_context_hash - else Lwt.return_false diff --git a/src/lib_context/context.mli b/src/lib_context/context.mli index 2b4f60b6a794..4e8bc89a8c20 100644 --- a/src/lib_context/context.mli +++ b/src/lib_context/context.mli @@ -150,52 +150,6 @@ val add_predecessor_ops_metadata_hash : (** {2 Context dumping} *) -module Protocol_data_legacy : sig - type t = Int32.t * data - - and info = {author : string; message : string; timestamp : Time.Protocol.t} - - and data = { - info : info; - protocol_hash : Protocol_hash.t; - test_chain_status : Test_chain_status.t; - data_key : Context_hash.t; - predecessor_block_metadata_hash : Block_metadata_hash.t option; - predecessor_ops_metadata_hash : Operation_metadata_list_list_hash.t option; - parents : Context_hash.t list; - } - - val to_bytes : t -> Bytes.t - - val of_bytes : Bytes.t -> t option - - val encoding : t Data_encoding.t -end - -module Block_data_legacy : sig - type t = {block_header : Block_header.t; operations : Operation.t list list} - - val to_bytes : t -> Bytes.t - - val of_bytes : Bytes.t -> t option - - val encoding : t Data_encoding.t -end - -module Pruned_block_legacy : sig - type t = { - block_header : Block_header.t; - operations : (int * Operation.t list) list; - operation_hashes : (int * Operation_hash.t list) list; - } - - val encoding : t Data_encoding.t - - val to_bytes : t -> Bytes.t - - val of_bytes : Bytes.t -> t option -end - val dump_context : index -> Context_hash.t -> fd:Lwt_unix.file_descr -> int tzresult Lwt.t @@ -206,52 +160,6 @@ val restore_context : fd:Lwt_unix.file_descr -> unit tzresult Lwt.t -val legacy_restore_context : - ?expected_block:string -> - index -> - snapshot_file:string -> - handle_block: - (History_mode.Legacy.t -> - Block_hash.t * Pruned_block_legacy.t -> - unit tzresult Lwt.t) -> - handle_protocol_data:(Protocol_data_legacy.t -> unit tzresult Lwt.t) -> - block_validation: - (Block_header.t option -> - Block_hash.t -> - Pruned_block_legacy.t -> - unit tzresult Lwt.t) -> - (Block_header.t - * Block_data_legacy.t - * Block_metadata_hash.t option - * Tezos_crypto.Operation_metadata_hash.t list list option - * Block_header.t option - * History_mode.Legacy.t) - tzresult - Lwt.t - -val legacy_read_metadata : - snapshot_file:string -> (string * History_mode.Legacy.t) tzresult Lwt.t - -(* Interface exposed for the lib_store/legacy_store *) -val legacy_restore_contexts : - index -> - filename:string -> - ((Block_hash.t * Pruned_block_legacy.t) list -> unit tzresult Lwt.t) -> - (Block_header.t option -> - Block_hash.t -> - Pruned_block_legacy.t -> - unit tzresult Lwt.t) -> - (Block_header.t - * Block_data_legacy.t - * Block_metadata_hash.t option - * Operation_metadata_hash.t list list option - * History_mode.Legacy.t - * Block_header.t option - * Block_hash.t list - * Protocol_data_legacy.t list) - tzresult - Lwt.t - val retrieve_commit_info : index -> Block_header.t -> @@ -281,40 +189,6 @@ val check_protocol_commit_consistency : parents_contexts:Context_hash.t list -> bool Lwt.t -(**/**) - -(** {b Warning} For testing purposes only *) - -val legacy_get_protocol_data_from_header : - index -> Block_header.t -> Protocol_data_legacy.t Lwt.t - -val legacy_dump_snapshot : - index -> - Block_header.t - * Block_data_legacy.t - * Block_metadata_hash.t option - * Operation_metadata_hash.t list list option - * History_mode.Legacy.t - * (Block_header.t -> - (Pruned_block_legacy.t option * Protocol_data_legacy.t option) tzresult - Lwt.t) -> - filename:string -> - unit tzresult Lwt.t - -val validate_context_hash_consistency_and_commit : - data_hash:Context_hash.t -> - expected_context_hash:Context_hash.t -> - timestamp:Time.Protocol.t -> - test_chain:Test_chain_status.t -> - protocol_hash:Protocol_hash.t -> - message:string -> - author:string -> - parents:Context_hash.t list -> - predecessor_block_metadata_hash:Block_metadata_hash.t option -> - predecessor_ops_metadata_hash:Operation_metadata_list_list_hash.t option -> - index:index -> - bool Lwt.t - (** Offline integrity checking and statistics for contexts. *) module Checks : sig module Pack : Irmin_pack.Checks.S diff --git a/src/lib_context/context_dump.ml b/src/lib_context/context_dump.ml index 59364adb78ea..37e705e6ac0f 100644 --- a/src/lib_context/context_dump.ml +++ b/src/lib_context/context_dump.ml @@ -90,27 +90,7 @@ let () = Format.fprintf ppf "Internal error while restoring the context.") empty (function Restore_context_failure -> Some () | _ -> None) - (fun () -> Restore_context_failure) ; - register_error_kind - `Permanent - ~id:"context_dump.inconsistent_imported_block_legacy" - ~title:"Inconsistent imported block legacy" - ~description:"The imported block is not the expected one." - ~pp:(fun ppf (got, exp) -> - Format.fprintf - ppf - "The block contained in the file is %a instead of %a." - Block_hash.pp - got - Block_hash.pp - exp) - (obj2 - (req "block_hash" Block_hash.encoding) - (req "block_hash_expected" Block_hash.encoding)) - (function - | Inconsistent_imported_block_legacy (got, exp) -> Some (got, exp) - | _ -> None) - (fun (got, exp) -> Inconsistent_imported_block_legacy (got, exp)) + (fun () -> Restore_context_failure) module Make (I : Dump_interface) = struct type command = @@ -415,647 +395,3 @@ let () = | Invalid_snapshot_version (found, expected) -> Some (found, expected) | _ -> None) (fun (found, expected) -> Invalid_snapshot_version (found, expected)) - -module Make_legacy (I : Dump_interface_legacy) = struct - let current_version = "tezos-snapshot-1.1.0" - - (* A set of versions that may be restored *) - let compatible_versions = [current_version; "tezos-snapshot-1.0.0"] - - type command = - | Root of { - block_header : I.Block_header.t; - info : I.commit_info; - parents : I.Commit_hash.t list; - block_data : I.Block_data.t; - pred_block_metadata_hash : Block_metadata_hash.t option; - pred_ops_metadata_hashes : Operation_metadata_hash.t list list option; - } - | Node of (string * I.Kinded_hash.t) list - | Blob of bytes - | Proot of I.Pruned_block.t - | Loot of I.Protocol_data.t - | End - - (* Command encoding. *) - - let blob_encoding = - let open Data_encoding in - case - ~title:"blob" - (Tag (Char.code 'b')) - bytes - (function Blob b -> Some b | _ -> None) - (function b -> Blob b) - - let node_encoding = - let open Data_encoding in - case - ~title:"node" - (Tag (Char.code 'd')) - (list (obj2 (req "name" string) (req "hash" I.Kinded_hash.encoding))) - (function Node x -> Some x | _ -> None) - (function x -> Node x) - - let end_encoding = - let open Data_encoding in - case - ~title:"end" - (Tag (Char.code 'e')) - empty - (function End -> Some () | _ -> None) - (fun () -> End) - - let loot_encoding = - let open Data_encoding in - case - ~title:"loot" - (Tag (Char.code 'l')) - I.Protocol_data.encoding - (function Loot protocol_data -> Some protocol_data | _ -> None) - (fun protocol_data -> Loot protocol_data) - - let loot_encoding_1_0_0 = - let open Data_encoding in - case - ~title:"loot" - (Tag (Char.code 'l')) - I.Protocol_data.encoding_1_0_0 - (function Loot protocol_data -> Some protocol_data | _ -> None) - (fun protocol_data -> Loot protocol_data) - - let proot_encoding = - let open Data_encoding in - case - ~title:"proot" - (Tag (Char.code 'p')) - (obj1 (req "pruned_block" I.Pruned_block.encoding)) - (function Proot pruned_block -> Some pruned_block | _ -> None) - (fun pruned_block -> Proot pruned_block) - - let root_encoding = - let open Data_encoding in - case - ~title:"root" - (Tag (Char.code 'r')) - (obj6 - (opt "pred_block_metadata_hash" Block_metadata_hash.encoding) - (opt - "pred_ops_metadata_hashes" - (list (list Operation_metadata_hash.encoding))) - (req "block_header" (dynamic_size I.Block_header.encoding)) - (req "info" I.commit_info_encoding) - (req "parents" (list I.Commit_hash.encoding)) - (req "block_data" I.Block_data.encoding)) - (function - | Root - { - pred_block_metadata_hash; - pred_ops_metadata_hashes; - block_header; - info; - parents; - block_data; - } -> - Some - ( pred_block_metadata_hash, - pred_ops_metadata_hashes, - block_header, - info, - parents, - block_data ) - | _ -> None) - (fun ( pred_block_metadata_hash, - pred_ops_metadata_hashes, - block_header, - info, - parents, - block_data ) -> - Root - { - pred_block_metadata_hash; - pred_ops_metadata_hashes; - block_header; - info; - parents; - block_data; - }) - - (* This version (1.0.0) doesn't include the optional fields - [pred_block_metadata_hash] and [pred_ops_metadata_hashes], but we can still - restore this version by setting these to [None]. *) - let root_encoding_1_0_0 = - let open Data_encoding in - case - ~title:"root" - (Tag (Char.code 'r')) - (obj4 - (req "block_header" (dynamic_size I.Block_header.encoding)) - (req "info" I.commit_info_encoding) - (req "parents" (list I.Commit_hash.encoding)) - (req "block_data" I.Block_data.encoding)) - (function - | Root - { - block_header; - pred_block_metadata_hash = _; - pred_ops_metadata_hashes = _; - info; - parents; - block_data; - } -> - Some (block_header, info, parents, block_data) - | _ -> None) - (fun (block_header, info, parents, block_data) -> - Root - { - block_header; - pred_block_metadata_hash = None; - pred_ops_metadata_hashes = None; - info; - parents; - block_data; - }) - - let command_encoding = - Data_encoding.union - ~tag_size:`Uint8 - [ - blob_encoding; - node_encoding; - end_encoding; - loot_encoding; - proot_encoding; - root_encoding; - ] - - let command_encoding_1_0_0 = - Data_encoding.union - ~tag_size:`Uint8 - [ - blob_encoding; - node_encoding; - end_encoding; - loot_encoding_1_0_0; - proot_encoding; - root_encoding_1_0_0; - ] - - (* IO toolkit. *) - - let rec read_string rbuf ~len = - let (fd, buf, ofs, total) = !rbuf in - if Bytes.length buf - ofs < len then ( - let blen = Bytes.length buf - ofs in - let neu = Bytes.create (blen + 1_000_000) in - Bytes.blit buf ofs neu 0 blen ; - Lwt_unix.read fd neu blen 1_000_000 >>= fun bread -> - total := !total + bread ; - if bread = 0 then fail Inconsistent_snapshot_file - else - let neu = - if bread <> 1_000_000 then Bytes.sub neu 0 (blen + bread) else neu - in - rbuf := (fd, neu, 0, total) ; - read_string rbuf ~len) - else - let res = Bytes.sub_string buf ofs len in - rbuf := (fd, buf, ofs + len, total) ; - return res - - let read_mbytes rbuf b = - read_string rbuf ~len:(Bytes.length b) >>=? fun string -> - Bytes.blit_string string 0 b 0 (Bytes.length b) ; - return () - - let set_int64 buf i = - let b = Bytes.create 8 in - EndianBytes.BigEndian.set_int64 b 0 i ; - Buffer.add_bytes buf b - - let get_int64 rbuf = - read_string ~len:8 rbuf >>=? fun s -> - return @@ EndianString.BigEndian.get_int64 s 0 - - let set_mbytes buf b = - set_int64 buf (Int64.of_int (Bytes.length b)) ; - Buffer.add_bytes buf b - - let get_mbytes rbuf = - get_int64 rbuf >|=? Int64.to_int >>=? fun l -> - let b = Bytes.create l in - read_mbytes rbuf b >>=? fun () -> return b - - (* Getter and setters *) - - let get_command command_encoding rbuf = - get_mbytes rbuf >|=? fun bytes -> - Data_encoding.Binary.of_bytes_exn command_encoding bytes - - let set_root buf block_header info parents block_data pred_block_metadata_hash - pred_ops_metadata_hashes = - let root = - Root - { - block_header; - info; - parents; - block_data; - pred_block_metadata_hash; - pred_ops_metadata_hashes; - } - in - let bytes = Data_encoding.Binary.to_bytes_exn command_encoding root in - set_mbytes buf bytes - - let set_tree buf tree = - (match tree with `Branch node -> Node node | `Leaf blob -> Blob blob) - |> Data_encoding.Binary.to_bytes_exn command_encoding - |> set_mbytes buf - - let set_proot buf pruned_block = - let proot = Proot pruned_block in - let bytes = Data_encoding.Binary.to_bytes_exn command_encoding proot in - set_mbytes buf bytes - - let set_loot buf protocol_data = - let loot = Loot protocol_data in - let bytes = Data_encoding.Binary.to_bytes_exn command_encoding loot in - set_mbytes buf bytes - - let set_end buf = - let bytes = Data_encoding.Binary.to_bytes_exn command_encoding End in - set_mbytes buf bytes - - (* Snapshot metadata *) - - type snapshot_metadata = {version : string; mode : History_mode.Legacy.t} - - let snapshot_metadata_encoding = - let open Data_encoding in - conv - (fun {version; mode} -> (version, mode)) - (fun (version, mode) -> {version; mode}) - (obj2 (req "version" string) (req "mode" History_mode.Legacy.encoding)) - - let write_snapshot_metadata ~mode buf = - let version = {version = current_version; mode} in - let bytes = - Data_encoding.(Binary.to_bytes_exn snapshot_metadata_encoding version) - in - set_mbytes buf bytes - - let read_snapshot_metadata rbuf = - get_mbytes rbuf >|=? fun bytes -> - Data_encoding.(Binary.of_bytes_exn snapshot_metadata_encoding) bytes - - let get_snapshot_metadata ~snapshot_fd = - let read = ref 0 in - let rbuf = ref (snapshot_fd, Bytes.empty, 0, read) in - read_snapshot_metadata rbuf >>=? fun {version; mode} -> - return (version, mode) - - let check_version v = - fail_when - (List.mem ~equal:String.equal v.version compatible_versions |> not) - (Invalid_snapshot_version (v.version, compatible_versions)) - - let serialize_tree ~maybe_flush ~written buf = - I.tree_iteri_unique (fun visited sub_tree -> - set_tree buf sub_tree ; - maybe_flush () >|= fun () -> - Tezos_stdlib_unix.Utils.display_progress - ~refresh_rate:(visited, 1_000) - (fun m -> - m - "Context: %dK elements, %dMiB written%!" - (visited / 1_000) - (written () / 1_048_576))) - - let dump_contexts_fd idx data ~fd = - (* Dumping *) - let buf = Buffer.create 1_000_000 in - let written = ref 0 in - let flush () = - let contents = Buffer.contents buf in - Buffer.clear buf ; - written := !written + String.length contents ; - Lwt_utils_unix.write_string fd contents - in - let maybe_flush () = - if Buffer.length buf > 1_000_000 then flush () else Lwt.return_unit - in - Lwt.catch - (fun () -> - let ( bh, - block_data, - pred_block_metadata_hash, - pred_ops_metadata_hashes, - mode, - pruned_iterator ) = - data - in - write_snapshot_metadata ~mode buf ; - I.get_context idx bh >>= function - | None -> fail @@ Context_not_found (I.Block_header.to_bytes bh) - | Some ctxt -> - I.context_tree ctxt - |> serialize_tree ~maybe_flush ~written:(fun () -> !written) buf - >>= fun () -> - Tezos_stdlib_unix.Utils.display_progress_end () ; - let parents = I.context_parents ctxt in - set_root - buf - bh - (I.context_info ctxt) - parents - block_data - pred_block_metadata_hash - pred_ops_metadata_hashes ; - (* Dump pruned blocks *) - let dump_pruned cpt pruned = - Tezos_stdlib_unix.Utils.display_progress - ~refresh_rate:(cpt, 1_000) - (fun m -> - m - "History: %dK block, %dMiB written" - (cpt / 1_000) - (!written / 1_048_576)) ; - set_proot buf pruned ; - maybe_flush () - in - let rec aux cpt acc header = - pruned_iterator header >>=? function - | (None, None) -> return acc (* assert false *) - | (None, Some protocol_data) -> return (protocol_data :: acc) - | (Some pred_pruned, Some protocol_data) -> - dump_pruned cpt pred_pruned >>= fun () -> - aux - (succ cpt) - (protocol_data :: acc) - (I.Pruned_block.header pred_pruned) - | (Some pred_pruned, None) -> - dump_pruned cpt pred_pruned >>= fun () -> - aux (succ cpt) acc (I.Pruned_block.header pred_pruned) - in - let starting_block_header = I.Block_data.header block_data in - aux 0 [] starting_block_header >>=? fun protocol_datas -> - (* Dump protocol data *) - List.iter_s - (fun proto -> - set_loot buf proto ; - maybe_flush ()) - protocol_datas - >>= fun () -> - Tezos_stdlib_unix.Utils.display_progress_end () ; - return_unit >>=? fun () -> - set_end buf ; - flush () >>= fun () -> return_unit) - (function - | Unix.Unix_error (e, _, _) -> - fail @@ System_write_error (Unix.error_message e) - | err -> Lwt.fail err) - - (* Restoring legacy *) - - let restore_context_fd ~fd ?expected_block ~handle_block ~handle_protocol_data - ~block_validation index = - let read = ref 0 in - let rbuf = ref (fd, Bytes.empty, 0, read) in - (* Editing the repository *) - let add_blob t blob = I.add_bytes t blob >>= fun tree -> return tree in - let add_dir t keys = - I.add_dir t keys >>= function - | None -> fail Restore_context_failure - | Some tree -> return tree - in - let restore version = - let encoding = - if version.version = "tezos-snapshot-1.0.0" then command_encoding_1_0_0 - else command_encoding - in - let history_mode = version.mode in - let rec first_pass ctxt batch notify = - notify () >>= fun () -> - get_command encoding rbuf >>=? function - | Node contents -> - add_dir batch contents >>=? fun tree -> - first_pass (I.update_context ctxt tree) batch notify - | Blob data -> - add_blob batch data >>=? fun tree -> - first_pass (I.update_context ctxt tree) batch notify - | Root - { - block_header; - info; - parents; - block_data; - pred_block_metadata_hash; - pred_ops_metadata_hashes; - } -> - (* Checks that the block hash imported by the snapshot is - the expected one *) - let imported_block_header = I.Block_data.header block_data in - let imported_block_hash = Block_header.hash imported_block_header in - (match expected_block with - | Some str -> - let bh = Block_hash.of_b58check_exn str in - fail_unless - (Block_hash.equal bh imported_block_hash) - (Inconsistent_imported_block_legacy (imported_block_hash, bh)) - | None -> return_unit) - >>=? fun () -> - I.set_context ~info ~parents ctxt block_header >>= fun is_correct -> - fail_unless is_correct Inconsistent_snapshot_data >>=? fun () -> - return - ( block_header, - block_data, - pred_block_metadata_hash, - pred_ops_metadata_hashes ) - | _ -> fail Inconsistent_snapshot_data - in - let second_pass notify = - let rec loop pred_header = - get_command encoding rbuf >>=? function - | Proot pruned_block -> - let header = I.Pruned_block.header pruned_block in - let hash = Block_header.hash header in - block_validation pred_header hash pruned_block >>=? fun () -> - handle_block history_mode (hash, pruned_block) >>=? fun () -> - notify () >>= fun () -> loop (Some header) - | Loot protocol_data -> - handle_protocol_data protocol_data >>=? fun () -> loop pred_header - | End -> return pred_header - | _ -> fail Inconsistent_snapshot_data - in - loop None - in - Animation.display_progress - ~every:1000 - ~pp_print_step:(fun fmt i -> - Format.fprintf - fmt - "Writing context: %dK elements, %s read" - (i / 1_000) - (if !read > 1_048_576 then - Format.asprintf "%dMiB" (!read / 1_048_576) - else Format.asprintf "%dKiB" (!read / 1_024))) - (fun notify -> - I.batch index (fun batch -> - first_pass (I.make_context index) batch notify)) - >>=? fun ( pred_block_header, - export_block_data, - pred_block_metadata_hash, - pred_ops_metadata_hashes ) -> - Animation.display_progress - ~every:1000 - ~pp_print_step:(fun fmt i -> - Format.fprintf fmt "Storing blocks: %d blocks wrote" i) - (fun notify -> second_pass notify) - >>=? fun oldest_header_opt -> - return - ( pred_block_header, - export_block_data, - oldest_header_opt, - pred_block_metadata_hash, - pred_ops_metadata_hashes ) - in - Lwt.catch - (fun () -> - (* Check snapshot version *) - read_snapshot_metadata rbuf >>=? fun version -> - check_version version >>=? fun () -> - restore version - >>=? fun ( pred_block_header, - export_block_data, - oldest_header_opt, - pred_block_metadata_hash, - pred_ops_metadata_hashes ) -> - return - ( pred_block_header, - export_block_data, - pred_block_metadata_hash, - pred_ops_metadata_hashes, - oldest_header_opt, - version.mode )) - (function - | Unix.Unix_error (e, _, _) -> - fail (System_read_error (Unix.error_message e)) - | Invalid_argument _ -> fail Inconsistent_snapshot_file - | err -> Lwt.fail err) - - let legacy_restore_contexts_fd index ~fd k_store_pruned_blocks - block_validation = - let read = ref 0 in - let rbuf = ref (fd, Bytes.empty, 0, read) in - (* Editing the repository *) - let add_blob t blob = I.add_bytes t blob >>= fun tree -> return tree in - let add_dir t keys = - I.add_dir t keys >>= function - | None -> fail Restore_context_failure - | Some tree -> return tree - in - let restore version = - let encoding = - if version.version = "tezos-snapshot-1.0.0" then command_encoding_1_0_0 - else command_encoding - in - let rec first_pass batch ctxt cpt = - Tezos_stdlib_unix.Utils.display_progress - ~refresh_rate:(cpt, 1_000) - (fun m -> - m - "Context: %dK elements, %dMiB read" - (cpt / 1_000) - (!read / 1_048_576)) ; - get_command encoding rbuf >>=? function - | Root - { - block_header; - info; - parents; - block_data; - pred_block_metadata_hash; - pred_ops_metadata_hashes; - } -> ( - I.set_context ~info ~parents ctxt block_header >>= function - | false -> fail Inconsistent_snapshot_data - | true -> - return - ( block_header, - block_data, - pred_block_metadata_hash, - pred_ops_metadata_hashes )) - | Node contents -> - add_dir batch contents >>=? fun tree -> - first_pass batch (I.update_context ctxt tree) (cpt + 1) - | Blob data -> - add_blob batch data >>=? fun tree -> - first_pass batch (I.update_context ctxt tree) (cpt + 1) - | _ -> fail Inconsistent_snapshot_data - in - let rec second_pass pred_header (rev_block_hashes, protocol_datas) todo - cpt = - Tezos_stdlib_unix.Utils.display_progress - ~refresh_rate:(cpt, 1_000) - (fun m -> - m "Store: %dK elements, %dMiB read" (cpt / 1_000) (!read / 1_048_576)) ; - get_command encoding rbuf >>=? function - | Proot pruned_block -> - let header = I.Pruned_block.header pruned_block in - let hash = Block_header.hash header in - block_validation pred_header hash pruned_block >>=? fun () -> - if (cpt + 1) mod 5_000 = 0 then - k_store_pruned_blocks ((hash, pruned_block) :: todo) - >>=? fun () -> - second_pass - (Some header) - (hash :: rev_block_hashes, protocol_datas) - [] - (cpt + 1) - else - second_pass - (Some header) - (hash :: rev_block_hashes, protocol_datas) - ((hash, pruned_block) :: todo) - (cpt + 1) - | Loot protocol_data -> - k_store_pruned_blocks todo >>=? fun () -> - second_pass - pred_header - (rev_block_hashes, protocol_data :: protocol_datas) - todo - (cpt + 1) - | End -> return (pred_header, rev_block_hashes, List.rev protocol_datas) - | _ -> fail Inconsistent_snapshot_data - in - I.batch index (fun batch -> first_pass batch (I.make_context index) 0) - >>=? fun ( block_header, - block_data, - pred_block_metadata_hash, - pred_ops_metadata_hashes ) -> - Tezos_stdlib_unix.Utils.display_progress_end () ; - second_pass None ([], []) [] 0 - >>=? fun (oldest_header_opt, rev_block_hashes, protocol_datas) -> - Tezos_stdlib_unix.Utils.display_progress_end () ; - return - ( block_header, - block_data, - pred_block_metadata_hash, - pred_ops_metadata_hashes, - version.mode, - oldest_header_opt, - rev_block_hashes, - protocol_datas ) - in - Lwt.catch - (fun () -> - (* Check snapshot version *) - read_snapshot_metadata rbuf >>=? fun version -> - check_version version >>=? fun () -> restore version) - (function - | Unix.Unix_error (e, _, _) -> - fail (System_read_error (Unix.error_message e)) - | Invalid_argument _ -> fail Inconsistent_snapshot_file - | err -> Lwt.fail err) -end diff --git a/src/lib_context/context_dump.mli b/src/lib_context/context_dump.mli index 727043daa5f2..80930860c12d 100644 --- a/src/lib_context/context_dump.mli +++ b/src/lib_context/context_dump.mli @@ -27,5 +27,3 @@ (** @inline *) include Context_dump_intf.Context_dump - -include Context_dump_intf.Context_dump_legacy diff --git a/src/lib_context/context_dump_intf.ml b/src/lib_context/context_dump_intf.ml index 9a8ff21387b7..1c2591c8796c 100644 --- a/src/lib_context/context_dump_intf.ml +++ b/src/lib_context/context_dump_intf.ml @@ -36,7 +36,6 @@ type error += | Missing_snapshot_data | Invalid_snapshot_version of string * string list | Restore_context_failure - | Inconsistent_imported_block_legacy of Block_hash.t * Block_hash.t (* A hash that has been tagged with the kind of the value that it references (either a node or a blob). *) @@ -292,88 +291,3 @@ module type Dump_interface_legacy = sig val add_dir : batch -> (step * Kinded_hash.t) list -> tree option Lwt.t end - -module type S_legacy = sig - type index - - type context - - type block_header - - type block_data - - type pruned_block - - type protocol_data - - (** {b Warning} Used only to create legacy snapshots (testing purposes) *) - val dump_contexts_fd : - index -> - block_header - * block_data - * Block_metadata_hash.t option - * Operation_metadata_hash.t list list option - * History_mode.Legacy.t - * (block_header -> - (pruned_block option * protocol_data option) tzresult Lwt.t) -> - fd:Lwt_unix.file_descr -> - unit tzresult Lwt.t - - val restore_context_fd : - fd:Lwt_unix.file_descr -> - ?expected_block:string -> - handle_block: - (History_mode.Legacy.t -> - Block_hash.t * pruned_block -> - unit tzresult Lwt.t) -> - handle_protocol_data:(protocol_data -> unit tzresult Lwt.t) -> - block_validation: - (block_header option -> - Block_hash.t -> - pruned_block -> - unit tzresult Lwt.t) -> - index -> - (block_header - * block_data - * Block_metadata_hash.t option - * Operation_metadata_hash.t list list option - * Block_header.t option - * History_mode.Legacy.t) - tzresult - Lwt.t - - val legacy_restore_contexts_fd : - index -> - fd:Lwt_unix.file_descr -> - ((Block_hash.t * pruned_block) list -> unit tzresult Lwt.t) -> - (block_header option -> Block_hash.t -> pruned_block -> unit tzresult Lwt.t) -> - (block_header - * block_data - * Block_metadata_hash.t option - * Operation_metadata_hash.t list list option - * History_mode.Legacy.t - * Block_header.t option - * Block_hash.t list - * protocol_data list) - tzresult - Lwt.t - - val get_snapshot_metadata : - snapshot_fd:Lwt_unix.file_descr -> - (string * History_mode.Legacy.t) tzresult Lwt.t -end - -module type Context_dump_legacy = sig - module type Dump_interface_legacy = Dump_interface_legacy - - module type S_legacy = S_legacy - - module Make_legacy (I : Dump_interface_legacy) : - S_legacy - with type index := I.index - and type context := I.context - and type block_header := I.Block_header.t - and type block_data := I.Block_data.t - and type pruned_block := I.Pruned_block.t - and type protocol_data := I.Protocol_data.t -end diff --git a/src/lib_shell_services/store_errors.ml b/src/lib_shell_services/store_errors.ml index de568bfa13d0..6d5cf0a8245c 100644 --- a/src/lib_shell_services/store_errors.ml +++ b/src/lib_shell_services/store_errors.ml @@ -342,8 +342,6 @@ type error += | Cannot_load_testchain of string | Missing_activation_block of Block_hash.t * Protocol_hash.t * History_mode.t | Inconsistent_protocol_commit_info of Block_hash.t * Protocol_hash.t - | Missing_activation_block_legacy of - Int32.t * Protocol_hash.t * History_mode.t | Missing_stored_data of string | Failed_to_get_live_blocks of Block_hash.t | Target_mismatch @@ -909,30 +907,6 @@ let () = (function | Inconsistent_protocol_commit_info (bh, ph) -> Some (bh, ph) | _ -> None) (fun (bh, ph) -> Inconsistent_protocol_commit_info (bh, ph)) ; - Error_monad.register_error_kind - `Temporary - ~id:"store.missing_activation_block_legacy" - ~title:"Missing activation block legacy" - ~description:"Missing activation block while restoring a legacy snapshot" - ~pp:(fun ppf (bl, ph, hm) -> - Format.fprintf - ppf - "Failed to restore legacy snapshot: the expected activation block \ - (level %ld) originating the protocol %a was not found for %a." - bl - Protocol_hash.pp - ph - History_mode.pp - hm) - Data_encoding.( - obj3 - (req "block_hash" int32) - (req "protocol_hash" Protocol_hash.encoding) - (req "history_mode" History_mode.encoding)) - (function - | Missing_activation_block_legacy (bl, ph, hm) -> Some (bl, ph, hm) - | _ -> None) - (fun (bl, ph, hm) -> Missing_activation_block_legacy (bl, ph, hm)) ; Error_monad.register_error_kind `Temporary ~id:"store.missing_stored_data" diff --git a/src/lib_store/snapshots.ml b/src/lib_store/snapshots.ml index 88760b07b1ba..9075de81f28f 100644 --- a/src/lib_store/snapshots.ml +++ b/src/lib_store/snapshots.ml @@ -25,7 +25,6 @@ open Snapshots_events open Store_types -open Store_errors (* This module handles snapshot's versioning system. *) module Version = struct @@ -35,11 +34,10 @@ module Version = struct let open Data_encoding in obj1 (req "version" int31) - (* Current version of the snapshots *) + (* Current version of the snapshots, since 0.0.5. + * Previous versions are: + * - 1: snapshot exported with storage 0.0.1 to 0.0.4 *) let current_version = 2 - - (* Legacy snapshots are fixed to a single version *) - let legacy_version = 1 end let current_version = Version.current_version @@ -95,7 +93,6 @@ type error += | Target_block_validation_failed of Block_hash.t * string | Directory_already_exists of string | Empty_floating_store - | Inconsistent_predecessors | Cannot_create_tmp_export_directory of string | Inconsistent_chain_import of { expected : Distributed_db_version.Name.t; @@ -452,18 +449,6 @@ let () = unit (function Empty_floating_store -> Some () | _ -> None) (fun () -> Empty_floating_store) ; - register_error_kind - `Permanent - ~id:"snapshot.inconsistent_predecessors" - ~title:"Inconsistent predecessors" - ~description:"Inconsistent predecessors while validating a legacy snapshot." - ~pp:(fun ppf () -> - Format.fprintf - ppf - "Failed to validate the predecessors: inconsistent hash.") - unit - (function Inconsistent_predecessors -> Some () | _ -> None) - (fun () -> Inconsistent_predecessors) ; register_error_kind `Permanent ~id:"snapshots.cannot_create_tmp_export_directory" @@ -528,7 +513,8 @@ let () = ~pp:(fun ppf filename -> Format.fprintf ppf - "Failed to read snapshot file %s. The provided file is inconsistent." + "Failed to read snapshot file %s. The provided file is inconsistent or \ + is from Octez 9.7 (or before) and it cannot be imported anymore." filename) Data_encoding.(obj1 (req "filename" string)) (function Inconsistent_snapshot_file s -> Some s | _ -> None) @@ -597,12 +583,7 @@ let metadata_encoding = versions. *) type header = Version.t * metadata -type snapshot_header = - | Current_header of header - | Legacy_metadata of { - version : string; - legacy_history_mode : History_mode.Legacy.t; - } +type snapshot_header = Current_header of header let pp_snapshot_header ppf = function | Current_header @@ -621,17 +602,8 @@ let pp_snapshot_header ppf = function History_mode.pp_short history_mode version - | Legacy_metadata {version; legacy_history_mode} -> - Format.fprintf - ppf - "version %s in %a" - version - History_mode.Legacy.pp - legacy_history_mode -let version = function - | Current_header (version, _) -> version - | Legacy_metadata _ -> Version.legacy_version +let version = function Current_header (version, _) -> version type snapshot_format = Tar | Raw @@ -642,7 +614,7 @@ let pp_snapshot_format ppf = function | Tar -> Format.fprintf ppf "tar (single file)" | Raw -> Format.fprintf ppf "directory" -type snapshot_kind = Current of snapshot_format | Legacy | Invalid +type snapshot_kind = Current of snapshot_format | Invalid (* To speed up the import of the cemented blocks we increase, temporarily the index cache size. *) @@ -3386,14 +3358,6 @@ let snapshot_file_kind ~snapshot_path = >>= fun _ -> Lwt.return_true) (fun _ -> Lwt.return_false) in - let is_valid_legacy_snapshot snapshot_file = - protect - ~on_error:(fun _ -> return_false) - (fun () -> - Context.legacy_read_metadata ~snapshot_file >>= function - | Ok _metadata -> return_true - | Error _ -> return_false) - in if Sys.is_directory snapshot_path then let snapshot_dir = Naming.snapshot_dir ~snapshot_path () in is_valid_raw_snapshot snapshot_dir >>= fun is_raw_snasphot -> @@ -3406,11 +3370,7 @@ let snapshot_file_kind ~snapshot_path = in is_valid_uncompressed_snapshot snapshot_file >>= fun is_uncompressed_snapshot -> - if is_uncompressed_snapshot then return (Current Tar) - else - is_valid_legacy_snapshot snapshot_path - >>=? fun is_valid_legacy_snapshot -> - if is_valid_legacy_snapshot then return Legacy else return Invalid + if is_uncompressed_snapshot then return (Current Tar) else return Invalid let export ?snapshot_path export_format ?rolling ~block ~store_dir ~context_dir ~chain_name genesis = @@ -3439,386 +3399,7 @@ let read_snapshot_header ~snapshot_path = in Loader.load_snapshot_header ~snapshot_path >>=? fun (version, metadata) -> return (Current_header (version, metadata)) - | Legacy -> - Context.legacy_read_metadata ~snapshot_file:snapshot_path - >>=? fun (version, legacy_history_mode) -> - return (Legacy_metadata {version; legacy_history_mode}) - | _ -> fail (Inconsistent_snapshot_file snapshot_path) - -(* Legacy import *) - -let legacy_verify_predecessors header_opt pred_hash = - match header_opt with - | None -> return_unit - | Some header -> - fail_unless - (header.Block_header.shell.level >= 2l - && Block_hash.equal header.shell.predecessor pred_hash) - Inconsistent_predecessors - -let legacy_check_operations_consistency block_header operations operation_hashes - = - (* Compute operations hashes and compare *) - (List.iter2_e - ~when_different_lengths: - Legacy_snapshots.Inconsistent_operation_hashes_lengths - (fun (_, op) (_, oph) -> - let expected_op_hash = List.map Operation.hash op in - List.iter2 - ~when_different_lengths: - Legacy_snapshots.Inconsistent_operation_hashes_lengths - (fun expected found -> assert (Operation_hash.equal expected found)) - expected_op_hash - oph) - operations - operation_hashes - |> function - | Ok _ as ok -> ok - | Error err -> error err) - (* To make a trace *) - >>? fun () -> - (* Check header hashes based on Merkle tree *) - let hashes = - List.rev_map (fun (_, opl) -> List.map Operation.hash opl) operations - in - let computed_hash = - Operation_list_list_hash.compute - (List.map Operation_list_hash.compute hashes) - in - let are_oph_equal = - Operation_list_list_hash.equal - computed_hash - block_header.Block_header.shell.operations_hash - in - error_unless - are_oph_equal - (Legacy_snapshots.Inconsistent_operation_hashes - (computed_hash, block_header.Block_header.shell.operations_hash)) - -let legacy_block_validation succ_header_opt header_hash - {Context.Pruned_block_legacy.block_header; operations; operation_hashes} = - legacy_verify_predecessors succ_header_opt header_hash >>=? fun () -> - Lwt.return - (legacy_check_operations_consistency - block_header - operations - operation_hashes) - -let import_log_notice_legacy ?snapshot_header filename block = - let header = - Option.map - (fun header -> Format.asprintf "%a" pp_snapshot_header header) - snapshot_header - in - Event.(emit import_info (filename, header)) >>= fun () -> - (match block with - | None -> Event.(emit import_unspecified_hash ()) - | Some _ -> Lwt.return_unit) - >>= fun () -> Event.(emit import_loading ()) - -let check_context_hash_consistency_legacy validation_store block_header = - fail_unless - (Context_hash.equal - validation_store.Tezos_validation.Block_validation.context_hash - block_header.Block_header.shell.context) - (Inconsistent_context_hash - { - expected = block_header.Block_header.shell.context; - got = validation_store.Tezos_validation.Block_validation.context_hash; - }) - -let import_legacy ?patch_context ?block:expected_block ~snapshot_file - ~dst_store_dir ~dst_context_dir ~chain_name ~user_activated_upgrades - ~user_activated_protocol_overrides genesis = - (* First: check that the imported snapshot is compatible with the - hardcoded networks *) - Legacy.Hardcoded.check_network ~chain_name >>=? fun () -> - import_log_notice_legacy snapshot_file expected_block >>= fun () -> - let chain_id = Chain_id.of_block_hash genesis.Genesis.block in - let dst_store_dir = Naming.store_dir ~dir_path:dst_store_dir in - let dst_protocol_dir = Naming.protocol_store_dir dst_store_dir in - let dst_chain_store_dir = Naming.chain_dir dst_store_dir chain_id in - let dst_cemented_dir = Naming.cemented_blocks_dir dst_chain_store_dir in - Lwt_list.iter_s - (Lwt_utils_unix.create_dir ~perm:0o755) - [ - Naming.dir_path dst_store_dir; - Naming.dir_path dst_protocol_dir; - Naming.dir_path dst_chain_store_dir; - Naming.dir_path dst_cemented_dir; - ] - >>= fun () -> - Context.init ~readonly:false ?patch_context dst_context_dir - >>= fun context_index -> - Lwt.finalize - (fun () -> - (* Start by commiting genesis in the context *) - Context.commit_genesis - context_index - ~chain_id - ~time:genesis.Genesis.time - ~protocol:genesis.protocol - >>=? fun genesis_context_hash -> - let cycle_length = Legacy.Hardcoded.cycle_length ~chain_name in - let floating_blocks = ref [] in - let current_blocks = ref [] in - let has_reached_cemented = ref false in - let partial_protocol_levels = ref [] in - let genesis_block = - Block_repr.create_genesis_block ~genesis genesis_context_hash - in - Cemented_block_store.init - ~log_size:cemented_import_log_size - ~readonly:false - dst_chain_store_dir - >>=? fun cemented_store -> - Lwt.finalize - (fun () -> - let handle_block snapshot_history_mode = - let is_rolling = - snapshot_history_mode = History_mode.Legacy.Rolling - in - fun ((hash : Block_hash.t), (block : Context.Pruned_block_legacy.t)) -> - (let proj (hash, (block : Context.Pruned_block_legacy.t)) = - let contents = - { - Block_repr.header = block.block_header; - operations = - List.rev_map (fun (_, l) -> l) block.operations; - (* TODO: incorporate the metadata hashes in a new format *) - block_metadata_hash = None; - operations_metadata_hashes = None; - } - in - {Block_repr.hash; contents; metadata = None} - in - let block = proj (hash, block) in - (* Blocks are stored in reverse order in legacy snapshots so - consing them puts them back in correct order. *) - if is_rolling then ( - current_blocks := block :: !current_blocks ; - return_unit) - else - (* Full snapshot *) - match Block_repr.level block with - (* Hardcoded special treatment for the first two blocks. *) - | 0l -> (* No genesis in previous format *) assert false - | 1l -> - (* Cement from genesis to this block *) - if !current_blocks <> [] then ( - assert (!floating_blocks = []) ; - current_blocks := !floating_blocks) ; - Cemented_block_store.cement_blocks - ~check_consistency:false - cemented_store - ~write_metadata:false - [genesis_block; block] - | level -> - (* 4 cases : - - in future floating blocks => after the cementing part - - at the end of a cycle - - in the middle of a cycle - - at the dawn of a cycle - *) - let is_end_of_a_cycle = - (* We are shifted by one in every cycles. *) - Compare.Int32.equal - 1l - Int32.(rem level (of_int cycle_length)) - in - if is_end_of_a_cycle then ( - if not !has_reached_cemented then ( - has_reached_cemented := true ; - (* All current blocks should be written in floating *) - (* We will write them later on *) - floating_blocks := !current_blocks) ; - (* Start building up the cycle to cement *) - current_blocks := [block] ; - return_unit) - else - let is_dawn_of_a_cycle = - Compare.Int32.equal - 2l - Int32.(rem level (of_int cycle_length)) - in - if is_dawn_of_a_cycle && !has_reached_cemented then ( - (* Cycle is complete, cement it *) - Cemented_block_store.cement_blocks - ~check_consistency:false - cemented_store - ~write_metadata:false - (block :: !current_blocks) - >>=? fun () -> - current_blocks := [] ; - return_unit) - else ( - current_blocks := block :: !current_blocks ; - return_unit)) - >>=? fun () -> return_unit - in - let handle_protocol_data (transition_level, protocol) = - let open Context.Protocol_data_legacy in - let open Protocol_levels in - let { - info = {author; message; _}; - protocol_hash; - test_chain_status; - predecessor_block_metadata_hash; - predecessor_ops_metadata_hash; - data_key; - parents; - } = - protocol - in - let commit_info = - { - author; - message; - test_chain_status; - predecessor_block_metadata_hash; - predecessor_ops_metadata_hash; - data_merkle_root = data_key; - parents_contexts = parents; - } - in - partial_protocol_levels := - (transition_level, protocol_hash, Some commit_info) - :: !partial_protocol_levels ; - return_unit - in - (* Restore context and fetch data *) - Context.legacy_restore_context - ?expected_block: - (Option.map (fun b -> Block_hash.to_b58check b) expected_block) - context_index - ~snapshot_file - ~handle_block - ~handle_protocol_data - ~block_validation:legacy_block_validation) - (fun () -> - Cemented_block_store.close cemented_store ; - Lwt.return_unit) - >>=? fun ( predecessor_block_header, - block_data, - predecessor_block_metadata_hash, - predecessor_ops_metadata_hashes, - _oldest_header_opt, - legacy_history_mode ) -> - let history_mode = History_mode.convert legacy_history_mode in - (* Floating blocks should be initialized now *) - let floating_blocks = - if not !has_reached_cemented then !current_blocks else !floating_blocks - in - (* Apply pred block *) - let pred_context_hash = predecessor_block_header.shell.context in - (Context.checkout context_index pred_context_hash >>= function - | Some ctxt -> return ctxt - | None -> - fail - (Cannot_checkout_context - (Block_header.hash predecessor_block_header, pred_context_hash))) - >>=? fun predecessor_context -> - let {Context.Block_data_legacy.block_header; operations} = block_data in - let predecessor_ops_metadata_hash = - Option.map - (fun ll -> - Operation_metadata_list_list_hash.compute - (List.map Operation_metadata_list_hash.compute ll)) - predecessor_ops_metadata_hashes - in - let apply_environment = - { - Block_validation.max_operations_ttl = - Int32.to_int predecessor_block_header.shell.level; - chain_id; - predecessor_block_header; - predecessor_context; - predecessor_block_metadata_hash; - predecessor_ops_metadata_hash; - user_activated_upgrades; - user_activated_protocol_overrides; - } - in - (Block_validation.apply - apply_environment - block_header - operations - ~cache:`Lazy - >>= function - | Ok block_validation_result -> return block_validation_result - | Error errs -> - Format.kasprintf - (fun errs -> - fail - (Target_block_validation_failed - (Block_header.hash block_header, errs))) - "%a" - pp_print_trace - errs) - >>=? fun {result = block_validation_result; _} -> - check_context_hash_consistency_legacy - block_validation_result.validation_store - block_header - >>=? fun () -> - let { - Block_validation.validation_store; - block_metadata; - ops_metadata; - block_metadata_hash; - ops_metadata_hashes = operations_metadata_hashes; - } = - block_validation_result - in - let contents = - { - Block_repr.header = block_header; - operations; - block_metadata_hash; - operations_metadata_hashes; - } - in - let { - Block_validation.message; - max_operations_ttl; - last_allowed_fork_level; - _; - } = - validation_store - in - let metadata = - Some - { - Block_repr.message; - max_operations_ttl; - last_allowed_fork_level; - block_metadata; - operations_metadata = ops_metadata; - } - in - let new_head_with_metadata = - ({hash = Block_header.hash block_header; contents; metadata} - : Block_repr.block) - in - (* Append the new head with the floating blocks *) - Animation.display_progress - ~every:100 - ~pp_print_step:(fun fmt i -> - Format.fprintf fmt "Storing floating blocks: %d blocks wrote" i) - (fun notify -> - Store.Unsafe.restore_from_legacy_snapshot - ~notify - dst_store_dir - ~context_index - ~genesis - ~genesis_context_hash - ~floating_blocks_stream:(Lwt_stream.of_list floating_blocks) - ~new_head_with_metadata - ~partial_protocol_levels:!partial_protocol_levels - ~history_mode)) - (fun () -> Context.close context_index) - >>=? fun () -> - (* Protocol will be stored next time the store is loaded *) - Event.(emit import_success snapshot_file) >>= fun () -> return_unit + | Invalid -> fail (Inconsistent_snapshot_file snapshot_path) let import ~snapshot_path ?patch_context ?block ?check_consistency ~dst_store_dir ~dst_context_dir ~chain_name ~user_activated_upgrades @@ -3845,15 +3426,4 @@ let import ~snapshot_path ?patch_context ?block ?check_consistency ~user_activated_upgrades ~user_activated_protocol_overrides genesis - | Legacy -> - import_legacy - ?patch_context - ?block - ~snapshot_file:snapshot_path - ~dst_store_dir - ~dst_context_dir - ~chain_name - ~user_activated_upgrades - ~user_activated_protocol_overrides - genesis - | _ -> fail (Inconsistent_snapshot_file snapshot_path) + | Invalid -> fail (Inconsistent_snapshot_file snapshot_path) diff --git a/src/lib_store/snapshots.mli b/src/lib_store/snapshots.mli index 373781219c21..9b99bfbe0759 100644 --- a/src/lib_store/snapshots.mli +++ b/src/lib_store/snapshots.mli @@ -148,7 +148,7 @@ val pp_snapshot_format : Format.formatter -> snapshot_format -> unit val snapshot_format_encoding : snapshot_format Data_encoding.t -type snapshot_kind = Current of snapshot_format | Legacy | Invalid +type snapshot_kind = Current of snapshot_format | Invalid type snapshot_header @@ -210,28 +210,3 @@ val import : returns its kind. Returns [Invalid] if it is a wrong snapshot file. *) val snapshot_file_kind : snapshot_path:string -> snapshot_kind tzresult Lwt.t - -(**/**) - -(* Exposed for testing purposes *) - -(** [import_legacy ?patch_context ?block ~dst_store_dir - ~dst_context_dir ~chain_name ~user_activated_upgrades - ~user_activated_protocol_overrides ~snapshot_file genesis] - - same as import but expect [snapshot_file] to be in the format of - version 1. The {!Cemented_block_store} will be artificially - reconstructed using hard-coded values of most common networks - (i.e. mainnet, carthagenet, sandbox). Fails if the [chain_name] - cannot be associated to one of the hard-coded supported network. *) -val import_legacy : - ?patch_context:(Context.t -> Context.t tzresult Lwt.t) -> - ?block:Block_hash.t -> - snapshot_file:string -> - dst_store_dir:string -> - dst_context_dir:string -> - chain_name:Distributed_db_version.Name.t -> - user_activated_upgrades:User_activated.upgrades -> - user_activated_protocol_overrides:User_activated.protocol_overrides -> - Genesis.t -> - unit tzresult Lwt.t -- GitLab From 59014c76e095fda6c606adf18894dd666f0ea061 Mon Sep 17 00:00:00 2001 From: Victor Allombert Date: Fri, 10 Dec 2021 10:40:48 +0100 Subject: [PATCH 4/6] Remove the LMDB vendored library --- vendors/README.md | 9 - vendors/ocaml-lmdb/LICENSE | 47 - vendors/ocaml-lmdb/config/discover.ml | 8 - vendors/ocaml-lmdb/config/dune | 2 - vendors/ocaml-lmdb/dune-project | 2 - vendors/ocaml-lmdb/src/dune | 14 - vendors/ocaml-lmdb/src/lmdb.h | 1647 ---- vendors/ocaml-lmdb/src/lmdb.ml | 648 -- vendors/ocaml-lmdb/src/lmdb.mli | 255 - vendors/ocaml-lmdb/src/lmdb_stubs.c | 569 -- vendors/ocaml-lmdb/src/mdb.c | 11153 ------------------------ vendors/ocaml-lmdb/src/midl.c | 421 - vendors/ocaml-lmdb/src/midl.h | 204 - vendors/ocaml-lmdb/test/dune | 8 - vendors/ocaml-lmdb/test/test.ml | 183 - vendors/ocaml-lmdb/tezos-lmdb.opam | 20 - 16 files changed, 15190 deletions(-) delete mode 100644 vendors/ocaml-lmdb/LICENSE delete mode 100644 vendors/ocaml-lmdb/config/discover.ml delete mode 100644 vendors/ocaml-lmdb/config/dune delete mode 100644 vendors/ocaml-lmdb/dune-project delete mode 100644 vendors/ocaml-lmdb/src/dune delete mode 100644 vendors/ocaml-lmdb/src/lmdb.h delete mode 100644 vendors/ocaml-lmdb/src/lmdb.ml delete mode 100644 vendors/ocaml-lmdb/src/lmdb.mli delete mode 100644 vendors/ocaml-lmdb/src/lmdb_stubs.c delete mode 100644 vendors/ocaml-lmdb/src/mdb.c delete mode 100644 vendors/ocaml-lmdb/src/midl.c delete mode 100644 vendors/ocaml-lmdb/src/midl.h delete mode 100644 vendors/ocaml-lmdb/test/dune delete mode 100644 vendors/ocaml-lmdb/test/test.ml delete mode 100644 vendors/ocaml-lmdb/tezos-lmdb.opam diff --git a/vendors/README.md b/vendors/README.md index 6eb97f78276d..7348169947cc 100644 --- a/vendors/README.md +++ b/vendors/README.md @@ -80,15 +80,6 @@ as merge-requests. #### Differences with the initial library Unknown -### ocaml-lmdb - -- ocaml-lmdb are simple OCaml bindings to Lightning Memory-Mapped Database from - Symas. -- Cloned from: [ocaml-lmdb](https://github.com/vbmithr/ocaml-lmdb) but evolved - since. - + The original C code is from [lmdb](https://github.com/LMDB/lmdb) -- It was vendored for fast development. - #### Differences with the initial library The ocaml bindings are slowly evolving to serve Tezos. diff --git a/vendors/ocaml-lmdb/LICENSE b/vendors/ocaml-lmdb/LICENSE deleted file mode 100644 index 05ad7571e448..000000000000 --- a/vendors/ocaml-lmdb/LICENSE +++ /dev/null @@ -1,47 +0,0 @@ -The OpenLDAP Public License - Version 2.8, 17 August 2003 - -Redistribution and use of this software and associated documentation -("Software"), with or without modification, are permitted provided -that the following conditions are met: - -1. Redistributions in source form must retain copyright statements - and notices, - -2. Redistributions in binary form must reproduce applicable copyright - statements and notices, this list of conditions, and the following - disclaimer in the documentation and/or other materials provided - with the distribution, and - -3. Redistributions must contain a verbatim copy of this document. - -The OpenLDAP Foundation may revise this license from time to time. -Each revision is distinguished by a version number. You may use -this Software under terms of this license revision or under the -terms of any subsequent revision of the license. - -THIS SOFTWARE IS PROVIDED BY THE OPENLDAP FOUNDATION AND ITS -CONTRIBUTORS ``AS IS'' AND ANY EXPRESSED OR IMPLIED WARRANTIES, -INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY -AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT -SHALL THE OPENLDAP FOUNDATION, ITS CONTRIBUTORS, OR THE AUTHOR(S) -OR OWNER(S) OF THE SOFTWARE BE LIABLE FOR ANY DIRECT, INDIRECT, -INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, -BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN -ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -POSSIBILITY OF SUCH DAMAGE. - -The names of the authors and copyright holders must not be used in -advertising or otherwise to promote the sale, use or other dealing -in this Software without specific, written prior permission. Title -to copyright in this Software shall at all times remain with copyright -holders. - -OpenLDAP is a registered trademark of the OpenLDAP Foundation. - -Copyright 1999-2003 The OpenLDAP Foundation, Redwood City, -California, USA. All Rights Reserved. Permission to copy and -distribute verbatim copies of this document is granted. diff --git a/vendors/ocaml-lmdb/config/discover.ml b/vendors/ocaml-lmdb/config/discover.ml deleted file mode 100644 index 1b0496ec9873..000000000000 --- a/vendors/ocaml-lmdb/config/discover.ml +++ /dev/null @@ -1,8 +0,0 @@ -let () = - let oc = open_out "c_flags.sexp" in - let w = "-W -Wall -Wno-unused-parameter -Wbad-function-cast -Wuninitialized" in - let thread = "-pthread" in - let opt = "-O2 -g" in - Printf.fprintf oc "(%s %s %s %s)" w thread opt - (if Sys.word_size = 32 then "-DMDB_VL32" else "") ; - close_out oc diff --git a/vendors/ocaml-lmdb/config/dune b/vendors/ocaml-lmdb/config/dune deleted file mode 100644 index 9d7ae488683c..000000000000 --- a/vendors/ocaml-lmdb/config/dune +++ /dev/null @@ -1,2 +0,0 @@ -(executable - (name discover)) diff --git a/vendors/ocaml-lmdb/dune-project b/vendors/ocaml-lmdb/dune-project deleted file mode 100644 index 448ab1555e58..000000000000 --- a/vendors/ocaml-lmdb/dune-project +++ /dev/null @@ -1,2 +0,0 @@ -(lang dune 2.0) -(name tezos-lmdb) diff --git a/vendors/ocaml-lmdb/src/dune b/vendors/ocaml-lmdb/src/dune deleted file mode 100644 index 48127d75ae8c..000000000000 --- a/vendors/ocaml-lmdb/src/dune +++ /dev/null @@ -1,14 +0,0 @@ -(library - (name lmdb) - (public_name tezos-lmdb) - (libraries rresult) - (foreign_stubs - (language c) - (names mdb midl lmdb_stubs) - (flags (:standard -Wstringop-overflow=0) (:include c_flags.sexp))) - (c_library_flags -lpthread) - ) - -(rule - (targets c_flags.sexp) - (action (run %{exe:../config/discover.exe} -ocamlc %{ocamlc}))) diff --git a/vendors/ocaml-lmdb/src/lmdb.h b/vendors/ocaml-lmdb/src/lmdb.h deleted file mode 100644 index 1fa11fd54e73..000000000000 --- a/vendors/ocaml-lmdb/src/lmdb.h +++ /dev/null @@ -1,1647 +0,0 @@ -/** @file lmdb.h - * @brief Lightning memory-mapped database library - * - * @mainpage Lightning Memory-Mapped Database Manager (LMDB) - * - * @section intro_sec Introduction - * LMDB is a Btree-based database management library modeled loosely on the - * BerkeleyDB API, but much simplified. The entire database is exposed - * in a memory map, and all data fetches return data directly - * from the mapped memory, so no malloc's or memcpy's occur during - * data fetches. As such, the library is extremely simple because it - * requires no page caching layer of its own, and it is extremely high - * performance and memory-efficient. It is also fully transactional with - * full ACID semantics, and when the memory map is read-only, the - * database integrity cannot be corrupted by stray pointer writes from - * application code. - * - * The library is fully thread-aware and supports concurrent read/write - * access from multiple processes and threads. Data pages use a copy-on- - * write strategy so no active data pages are ever overwritten, which - * also provides resistance to corruption and eliminates the need of any - * special recovery procedures after a system crash. Writes are fully - * serialized; only one write transaction may be active at a time, which - * guarantees that writers can never deadlock. The database structure is - * multi-versioned so readers run with no locks; writers cannot block - * readers, and readers don't block writers. - * - * Unlike other well-known database mechanisms which use either write-ahead - * transaction logs or append-only data writes, LMDB requires no maintenance - * during operation. Both write-ahead loggers and append-only databases - * require periodic checkpointing and/or compaction of their log or database - * files otherwise they grow without bound. LMDB tracks free pages within - * the database and re-uses them for new write operations, so the database - * size does not grow without bound in normal use. - * - * The memory map can be used as a read-only or read-write map. It is - * read-only by default as this provides total immunity to corruption. - * Using read-write mode offers much higher write performance, but adds - * the possibility for stray application writes thru pointers to silently - * corrupt the database. Of course if your application code is known to - * be bug-free (...) then this is not an issue. - * - * If this is your first time using a transactional embedded key/value - * store, you may find the \ref starting page to be helpful. - * - * @section caveats_sec Caveats - * Troubleshooting the lock file, plus semaphores on BSD systems: - * - * - A broken lockfile can cause sync issues. - * Stale reader transactions left behind by an aborted program - * cause further writes to grow the database quickly, and - * stale locks can block further operation. - * - * Fix: Check for stale readers periodically, using the - * #mdb_reader_check function or the \ref mdb_stat_1 "mdb_stat" tool. - * Stale writers will be cleared automatically on most systems: - * - Windows - automatic - * - BSD, systems using SysV semaphores - automatic - * - Linux, systems using POSIX mutexes with Robust option - automatic - * Otherwise just make all programs using the database close it; - * the lockfile is always reset on first open of the environment. - * - * - On BSD systems or others configured with MDB_USE_SYSV_SEM or - * MDB_USE_POSIX_SEM, - * startup can fail due to semaphores owned by another userid. - * - * Fix: Open and close the database as the user which owns the - * semaphores (likely last user) or as root, while no other - * process is using the database. - * - * Restrictions/caveats (in addition to those listed for some functions): - * - * - Only the database owner should normally use the database on - * BSD systems or when otherwise configured with MDB_USE_POSIX_SEM. - * Multiple users can cause startup to fail later, as noted above. - * - * - There is normally no pure read-only mode, since readers need write - * access to locks and lock file. Exceptions: On read-only filesystems - * or with the #MDB_NOLOCK flag described under #mdb_env_open(). - * - * - An LMDB configuration will often reserve considerable \b unused - * memory address space and maybe file size for future growth. - * This does not use actual memory or disk space, but users may need - * to understand the difference so they won't be scared off. - * - * - By default, in versions before 0.9.10, unused portions of the data - * file might receive garbage data from memory freed by other code. - * (This does not happen when using the #MDB_WRITEMAP flag.) As of - * 0.9.10 the default behavior is to initialize such memory before - * writing to the data file. Since there may be a slight performance - * cost due to this initialization, applications may disable it using - * the #MDB_NOMEMINIT flag. Applications handling sensitive data - * which must not be written should not use this flag. This flag is - * irrelevant when using #MDB_WRITEMAP. - * - * - A thread can only use one transaction at a time, plus any child - * transactions. Each transaction belongs to one thread. See below. - * The #MDB_NOTLS flag changes this for read-only transactions. - * - * - Use an MDB_env* in the process which opened it, not after fork(). - * - * - Do not have open an LMDB database twice in the same process at - * the same time. Not even from a plain open() call - close()ing it - * breaks fcntl() advisory locking. (It is OK to reopen it after - * fork() - exec*(), since the lockfile has FD_CLOEXEC set.) - * - * - Avoid long-lived transactions. Read transactions prevent - * reuse of pages freed by newer write transactions, thus the - * database can grow quickly. Write transactions prevent - * other write transactions, since writes are serialized. - * - * - Avoid suspending a process with active transactions. These - * would then be "long-lived" as above. Also read transactions - * suspended when writers commit could sometimes see wrong data. - * - * ...when several processes can use a database concurrently: - * - * - Avoid aborting a process with an active transaction. - * The transaction becomes "long-lived" as above until a check - * for stale readers is performed or the lockfile is reset, - * since the process may not remove it from the lockfile. - * - * This does not apply to write transactions if the system clears - * stale writers, see above. - * - * - If you do that anyway, do a periodic check for stale readers. Or - * close the environment once in a while, so the lockfile can get reset. - * - * - Do not use LMDB databases on remote filesystems, even between - * processes on the same host. This breaks flock() on some OSes, - * possibly memory map sync, and certainly sync between programs - * on different hosts. - * - * - Opening a database can fail if another process is opening or - * closing it at exactly the same time. - * - * @author Howard Chu, Symas Corporation. - * - * @copyright Copyright 2011-2018 Howard Chu, Symas Corp. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted only as authorized by the OpenLDAP - * Public License. - * - * A copy of this license is available in the file LICENSE in the - * top-level directory of the distribution or, alternatively, at - * . - * - * @par Derived From: - * This code is derived from btree.c written by Martin Hedenfalk. - * - * Copyright (c) 2009, 2010 Martin Hedenfalk - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ -#ifndef _LMDB_H_ -#define _LMDB_H_ - -#include -#include -#include - -#ifdef __cplusplus -extern "C" { -#endif - -/** Unix permissions for creating files, or dummy definition for Windows */ -#ifdef _MSC_VER -typedef int mdb_mode_t; -#else -typedef mode_t mdb_mode_t; -#endif - -#ifdef _WIN32 -# define MDB_FMT_Z "I" -#else -# define MDB_FMT_Z "z" /**< printf/scanf format modifier for size_t */ -#endif - -#ifndef MDB_VL32 -/** Unsigned type used for mapsize, entry counts and page/transaction IDs. - * - * It is normally size_t, hence the name. Defining MDB_VL32 makes it - * uint64_t, but do not try this unless you know what you are doing. - */ -typedef size_t mdb_size_t; -# define MDB_SIZE_MAX SIZE_MAX /**< max #mdb_size_t */ -/** #mdb_size_t printf formats, \b t = one of [diouxX] without quotes */ -# define MDB_PRIy(t) MDB_FMT_Z #t -/** #mdb_size_t scanf formats, \b t = one of [dioux] without quotes */ -# define MDB_SCNy(t) MDB_FMT_Z #t -#else -typedef uint64_t mdb_size_t; -# define MDB_SIZE_MAX UINT64_MAX -# define MDB_PRIy(t) PRI##t##64 -# define MDB_SCNy(t) SCN##t##64 -# define mdb_env_create mdb_env_create_vl32 /**< Prevent mixing with non-VL32 builds */ -#endif - -/** An abstraction for a file handle. - * On POSIX systems file handles are small integers. On Windows - * they're opaque pointers. - */ -#ifdef _WIN32 -typedef void *mdb_filehandle_t; -#else -typedef int mdb_filehandle_t; -#endif - -/** @defgroup mdb LMDB API - * @{ - * @brief OpenLDAP Lightning Memory-Mapped Database Manager - */ -/** @defgroup Version Version Macros - * @{ - */ -/** Library major version */ -#define MDB_VERSION_MAJOR 0 -/** Library minor version */ -#define MDB_VERSION_MINOR 9 -/** Library patch version */ -#define MDB_VERSION_PATCH 70 - -/** Combine args a,b,c into a single integer for easy version comparisons */ -#define MDB_VERINT(a,b,c) (((a) << 24) | ((b) << 16) | (c)) - -/** The full library version as a single integer */ -#define MDB_VERSION_FULL \ - MDB_VERINT(MDB_VERSION_MAJOR,MDB_VERSION_MINOR,MDB_VERSION_PATCH) - -/** The release date of this library version */ -#define MDB_VERSION_DATE "December 19, 2015" - -/** A stringifier for the version info */ -#define MDB_VERSTR(a,b,c,d) "LMDB " #a "." #b "." #c ": (" d ")" - -/** A helper for the stringifier macro */ -#define MDB_VERFOO(a,b,c,d) MDB_VERSTR(a,b,c,d) - -/** The full library version as a C string */ -#define MDB_VERSION_STRING \ - MDB_VERFOO(MDB_VERSION_MAJOR,MDB_VERSION_MINOR,MDB_VERSION_PATCH,MDB_VERSION_DATE) -/** @} */ - -/** @brief Opaque structure for a database environment. - * - * A DB environment supports multiple databases, all residing in the same - * shared-memory map. - */ -typedef struct MDB_env MDB_env; - -/** @brief Opaque structure for a transaction handle. - * - * All database operations require a transaction handle. Transactions may be - * read-only or read-write. - */ -typedef struct MDB_txn MDB_txn; - -/** @brief A handle for an individual database in the DB environment. */ -typedef unsigned int MDB_dbi; - -/** @brief Opaque structure for navigating through a database */ -typedef struct MDB_cursor MDB_cursor; - -/** @brief Generic structure used for passing keys and data in and out - * of the database. - * - * Values returned from the database are valid only until a subsequent - * update operation, or the end of the transaction. Do not modify or - * free them, they commonly point into the database itself. - * - * Key sizes must be between 1 and #mdb_env_get_maxkeysize() inclusive. - * The same applies to data sizes in databases with the #MDB_DUPSORT flag. - * Other data items can in theory be from 0 to 0xffffffff bytes long. - */ -typedef struct MDB_val { - size_t mv_size; /**< size of the data item */ - void *mv_data; /**< address of the data item */ -} MDB_val; - -/** @brief A callback function used to compare two keys in a database */ -typedef int (MDB_cmp_func)(const MDB_val *a, const MDB_val *b); - -/** @brief A callback function used to relocate a position-dependent data item - * in a fixed-address database. - * - * The \b newptr gives the item's desired address in - * the memory map, and \b oldptr gives its previous address. The item's actual - * data resides at the address in \b item. This callback is expected to walk - * through the fields of the record in \b item and modify any - * values based at the \b oldptr address to be relative to the \b newptr address. - * @param[in,out] item The item that is to be relocated. - * @param[in] oldptr The previous address. - * @param[in] newptr The new address to relocate to. - * @param[in] relctx An application-provided context, set by #mdb_set_relctx(). - * @todo This feature is currently unimplemented. - */ -typedef void (MDB_rel_func)(MDB_val *item, void *oldptr, void *newptr, void *relctx); - -/** @defgroup mdb_env Environment Flags - * @{ - */ - /** mmap at a fixed address (experimental) */ -#define MDB_FIXEDMAP 0x01 - /** no environment directory */ -#define MDB_NOSUBDIR 0x4000 - /** don't fsync after commit */ -#define MDB_NOSYNC 0x10000 - /** read only */ -#define MDB_RDONLY 0x20000 - /** don't fsync metapage after commit */ -#define MDB_NOMETASYNC 0x40000 - /** use writable mmap */ -#define MDB_WRITEMAP 0x80000 - /** use asynchronous msync when #MDB_WRITEMAP is used */ -#define MDB_MAPASYNC 0x100000 - /** tie reader locktable slots to #MDB_txn objects instead of to threads */ -#define MDB_NOTLS 0x200000 - /** don't do any locking, caller must manage their own locks */ -#define MDB_NOLOCK 0x400000 - /** don't do readahead (no effect on Windows) */ -#define MDB_NORDAHEAD 0x800000 - /** don't initialize malloc'd memory before writing to datafile */ -#define MDB_NOMEMINIT 0x1000000 - /** use the previous meta page rather than the latest one */ -#define MDB_PREVMETA 0x2000000 -/** @} */ - -/** @defgroup mdb_dbi_open Database Flags - * @{ - */ - /** use reverse string keys */ -#define MDB_REVERSEKEY 0x02 - /** use sorted duplicates */ -#define MDB_DUPSORT 0x04 - /** numeric keys in native byte order, either unsigned int or #mdb_size_t. - * (lmdb expects 32-bit int <= size_t <= 32/64-bit mdb_size_t.) - * The keys must all be of the same size. */ -#define MDB_INTEGERKEY 0x08 - /** with #MDB_DUPSORT, sorted dup items have fixed size */ -#define MDB_DUPFIXED 0x10 - /** with #MDB_DUPSORT, dups are #MDB_INTEGERKEY-style integers */ -#define MDB_INTEGERDUP 0x20 - /** with #MDB_DUPSORT, use reverse string dups */ -#define MDB_REVERSEDUP 0x40 - /** create DB if not already existing */ -#define MDB_CREATE 0x40000 -/** @} */ - -/** @defgroup mdb_put Write Flags - * @{ - */ -/** For put: Don't write if the key already exists. */ -#define MDB_NOOVERWRITE 0x10 -/** Only for #MDB_DUPSORT
- * For put: don't write if the key and data pair already exist.
- * For mdb_cursor_del: remove all duplicate data items. - */ -#define MDB_NODUPDATA 0x20 -/** For mdb_cursor_put: overwrite the current key/data pair */ -#define MDB_CURRENT 0x40 -/** For put: Just reserve space for data, don't copy it. Return a - * pointer to the reserved space. - */ -#define MDB_RESERVE 0x10000 -/** Data is being appended, don't split full pages. */ -#define MDB_APPEND 0x20000 -/** Duplicate data is being appended, don't split full pages. */ -#define MDB_APPENDDUP 0x40000 -/** Store multiple data items in one call. Only for #MDB_DUPFIXED. */ -#define MDB_MULTIPLE 0x80000 -/* @} */ - -/** @defgroup mdb_copy Copy Flags - * @{ - */ -/** Compacting copy: Omit free space from copy, and renumber all - * pages sequentially. - */ -#define MDB_CP_COMPACT 0x01 -/* @} */ - -/** @brief Cursor Get operations. - * - * This is the set of all operations for retrieving data - * using a cursor. - */ -typedef enum MDB_cursor_op { - MDB_FIRST, /**< Position at first key/data item */ - MDB_FIRST_DUP, /**< Position at first data item of current key. - Only for #MDB_DUPSORT */ - MDB_GET_BOTH, /**< Position at key/data pair. Only for #MDB_DUPSORT */ - MDB_GET_BOTH_RANGE, /**< position at key, nearest data. Only for #MDB_DUPSORT */ - MDB_GET_CURRENT, /**< Return key/data at current cursor position */ - MDB_GET_MULTIPLE, /**< Return key and up to a page of duplicate data items - from current cursor position. Move cursor to prepare - for #MDB_NEXT_MULTIPLE. Only for #MDB_DUPFIXED */ - MDB_LAST, /**< Position at last key/data item */ - MDB_LAST_DUP, /**< Position at last data item of current key. - Only for #MDB_DUPSORT */ - MDB_NEXT, /**< Position at next data item */ - MDB_NEXT_DUP, /**< Position at next data item of current key. - Only for #MDB_DUPSORT */ - MDB_NEXT_MULTIPLE, /**< Return key and up to a page of duplicate data items - from next cursor position. Move cursor to prepare - for #MDB_NEXT_MULTIPLE. Only for #MDB_DUPFIXED */ - MDB_NEXT_NODUP, /**< Position at first data item of next key */ - MDB_PREV, /**< Position at previous data item */ - MDB_PREV_DUP, /**< Position at previous data item of current key. - Only for #MDB_DUPSORT */ - MDB_PREV_NODUP, /**< Position at last data item of previous key */ - MDB_SET, /**< Position at specified key */ - MDB_SET_KEY, /**< Position at specified key, return key + data */ - MDB_SET_RANGE, /**< Position at first key greater than or equal to specified key. */ - MDB_PREV_MULTIPLE /**< Position at previous page and return key and up to - a page of duplicate data items. Only for #MDB_DUPFIXED */ -} MDB_cursor_op; - -/** @defgroup errors Return Codes - * - * BerkeleyDB uses -30800 to -30999, we'll go under them - * @{ - */ - /** Successful result */ -#define MDB_SUCCESS 0 - /** key/data pair already exists */ -#define MDB_KEYEXIST (-30799) - /** key/data pair not found (EOF) */ -#define MDB_NOTFOUND (-30798) - /** Requested page not found - this usually indicates corruption */ -#define MDB_PAGE_NOTFOUND (-30797) - /** Located page was wrong type */ -#define MDB_CORRUPTED (-30796) - /** Update of meta page failed or environment had fatal error */ -#define MDB_PANIC (-30795) - /** Environment version mismatch */ -#define MDB_VERSION_MISMATCH (-30794) - /** File is not a valid LMDB file */ -#define MDB_INVALID (-30793) - /** Environment mapsize reached */ -#define MDB_MAP_FULL (-30792) - /** Environment maxdbs reached */ -#define MDB_DBS_FULL (-30791) - /** Environment maxreaders reached */ -#define MDB_READERS_FULL (-30790) - /** Too many TLS keys in use - Windows only */ -#define MDB_TLS_FULL (-30789) - /** Txn has too many dirty pages */ -#define MDB_TXN_FULL (-30788) - /** Cursor stack too deep - internal error */ -#define MDB_CURSOR_FULL (-30787) - /** Page has not enough space - internal error */ -#define MDB_PAGE_FULL (-30786) - /** Database contents grew beyond environment mapsize */ -#define MDB_MAP_RESIZED (-30785) - /** Operation and DB incompatible, or DB type changed. This can mean: - *
    - *
  • The operation expects an #MDB_DUPSORT / #MDB_DUPFIXED database. - *
  • Opening a named DB when the unnamed DB has #MDB_DUPSORT / #MDB_INTEGERKEY. - *
  • Accessing a data record as a database, or vice versa. - *
  • The database was dropped and recreated with different flags. - *
- */ -#define MDB_INCOMPATIBLE (-30784) - /** Invalid reuse of reader locktable slot */ -#define MDB_BAD_RSLOT (-30783) - /** Transaction must abort, has a child, or is invalid */ -#define MDB_BAD_TXN (-30782) - /** Unsupported size of key/DB name/data, or wrong DUPFIXED size */ -#define MDB_BAD_VALSIZE (-30781) - /** The specified DBI was changed unexpectedly */ -#define MDB_BAD_DBI (-30780) - /** Unexpected problem - txn should abort */ -#define MDB_PROBLEM (-30779) - /** The last defined error code */ -#define MDB_LAST_ERRCODE MDB_PROBLEM -/** @} */ - -/** @brief Statistics for a database in the environment */ -typedef struct MDB_stat { - unsigned int ms_psize; /**< Size of a database page. - This is currently the same for all databases. */ - unsigned int ms_depth; /**< Depth (height) of the B-tree */ - mdb_size_t ms_branch_pages; /**< Number of internal (non-leaf) pages */ - mdb_size_t ms_leaf_pages; /**< Number of leaf pages */ - mdb_size_t ms_overflow_pages; /**< Number of overflow pages */ - mdb_size_t ms_entries; /**< Number of data items */ -} MDB_stat; - -/** @brief Information about the environment */ -typedef struct MDB_envinfo { - void *me_mapaddr; /**< Address of map, if fixed */ - mdb_size_t me_mapsize; /**< Size of the data memory map */ - mdb_size_t me_last_pgno; /**< ID of the last used page */ - mdb_size_t me_last_txnid; /**< ID of the last committed transaction */ - unsigned int me_maxreaders; /**< max reader slots in the environment */ - unsigned int me_numreaders; /**< max reader slots used in the environment */ -} MDB_envinfo; - - /** @brief Return the LMDB library version information. - * - * @param[out] major if non-NULL, the library major version number is copied here - * @param[out] minor if non-NULL, the library minor version number is copied here - * @param[out] patch if non-NULL, the library patch version number is copied here - * @retval "version string" The library version as a string - */ -char *mdb_version(int *major, int *minor, int *patch); - - /** @brief Return a string describing a given error code. - * - * This function is a superset of the ANSI C X3.159-1989 (ANSI C) strerror(3) - * function. If the error code is greater than or equal to 0, then the string - * returned by the system function strerror(3) is returned. If the error code - * is less than 0, an error string corresponding to the LMDB library error is - * returned. See @ref errors for a list of LMDB-specific error codes. - * @param[in] err The error code - * @retval "error message" The description of the error - */ -char *mdb_strerror(int err); - - /** @brief Create an LMDB environment handle. - * - * This function allocates memory for a #MDB_env structure. To release - * the allocated memory and discard the handle, call #mdb_env_close(). - * Before the handle may be used, it must be opened using #mdb_env_open(). - * Various other options may also need to be set before opening the handle, - * e.g. #mdb_env_set_mapsize(), #mdb_env_set_maxreaders(), #mdb_env_set_maxdbs(), - * depending on usage requirements. - * @param[out] env The address where the new handle will be stored - * @return A non-zero error value on failure and 0 on success. - */ -int mdb_env_create(MDB_env **env); - - /** @brief Open an environment handle. - * - * If this function fails, #mdb_env_close() must be called to discard the #MDB_env handle. - * @param[in] env An environment handle returned by #mdb_env_create() - * @param[in] path The directory in which the database files reside. This - * directory must already exist and be writable. - * @param[in] flags Special options for this environment. This parameter - * must be set to 0 or by bitwise OR'ing together one or more of the - * values described here. - * Flags set by mdb_env_set_flags() are also used. - *
    - *
  • #MDB_FIXEDMAP - * use a fixed address for the mmap region. This flag must be specified - * when creating the environment, and is stored persistently in the environment. - * If successful, the memory map will always reside at the same virtual address - * and pointers used to reference data items in the database will be constant - * across multiple invocations. This option may not always work, depending on - * how the operating system has allocated memory to shared libraries and other uses. - * The feature is highly experimental. - *
  • #MDB_NOSUBDIR - * By default, LMDB creates its environment in a directory whose - * pathname is given in \b path, and creates its data and lock files - * under that directory. With this option, \b path is used as-is for - * the database main data file. The database lock file is the \b path - * with "-lock" appended. - *
  • #MDB_RDONLY - * Open the environment in read-only mode. No write operations will be - * allowed. LMDB will still modify the lock file - except on read-only - * filesystems, where LMDB does not use locks. - *
  • #MDB_WRITEMAP - * Use a writeable memory map unless MDB_RDONLY is set. This uses - * fewer mallocs but loses protection from application bugs - * like wild pointer writes and other bad updates into the database. - * This may be slightly faster for DBs that fit entirely in RAM, but - * is slower for DBs larger than RAM. - * Incompatible with nested transactions. - * Do not mix processes with and without MDB_WRITEMAP on the same - * environment. This can defeat durability (#mdb_env_sync etc). - *
  • #MDB_NOMETASYNC - * Flush system buffers to disk only once per transaction, omit the - * metadata flush. Defer that until the system flushes files to disk, - * or next non-MDB_RDONLY commit or #mdb_env_sync(). This optimization - * maintains database integrity, but a system crash may undo the last - * committed transaction. I.e. it preserves the ACI (atomicity, - * consistency, isolation) but not D (durability) database property. - * This flag may be changed at any time using #mdb_env_set_flags(). - *
  • #MDB_NOSYNC - * Don't flush system buffers to disk when committing a transaction. - * This optimization means a system crash can corrupt the database or - * lose the last transactions if buffers are not yet flushed to disk. - * The risk is governed by how often the system flushes dirty buffers - * to disk and how often #mdb_env_sync() is called. However, if the - * filesystem preserves write order and the #MDB_WRITEMAP flag is not - * used, transactions exhibit ACI (atomicity, consistency, isolation) - * properties and only lose D (durability). I.e. database integrity - * is maintained, but a system crash may undo the final transactions. - * Note that (#MDB_NOSYNC | #MDB_WRITEMAP) leaves the system with no - * hint for when to write transactions to disk, unless #mdb_env_sync() - * is called. (#MDB_MAPASYNC | #MDB_WRITEMAP) may be preferable. - * This flag may be changed at any time using #mdb_env_set_flags(). - *
  • #MDB_MAPASYNC - * When using #MDB_WRITEMAP, use asynchronous flushes to disk. - * As with #MDB_NOSYNC, a system crash can then corrupt the - * database or lose the last transactions. Calling #mdb_env_sync() - * ensures on-disk database integrity until next commit. - * This flag may be changed at any time using #mdb_env_set_flags(). - *
  • #MDB_NOTLS - * Don't use Thread-Local Storage. Tie reader locktable slots to - * #MDB_txn objects instead of to threads. I.e. #mdb_txn_reset() keeps - * the slot reserved for the #MDB_txn object. A thread may use parallel - * read-only transactions. A read-only transaction may span threads if - * the user synchronizes its use. Applications that multiplex many - * user threads over individual OS threads need this option. Such an - * application must also serialize the write transactions in an OS - * thread, since LMDB's write locking is unaware of the user threads. - *
  • #MDB_NOLOCK - * Don't do any locking. If concurrent access is anticipated, the - * caller must manage all concurrency itself. For proper operation - * the caller must enforce single-writer semantics, and must ensure - * that no readers are using old transactions while a writer is - * active. The simplest approach is to use an exclusive lock so that - * no readers may be active at all when a writer begins. - *
  • #MDB_NORDAHEAD - * Turn off readahead. Most operating systems perform readahead on - * read requests by default. This option turns it off if the OS - * supports it. Turning it off may help random read performance - * when the DB is larger than RAM and system RAM is full. - * The option is not implemented on Windows. - *
  • #MDB_NOMEMINIT - * Don't initialize malloc'd memory before writing to unused spaces - * in the data file. By default, memory for pages written to the data - * file is obtained using malloc. While these pages may be reused in - * subsequent transactions, freshly malloc'd pages will be initialized - * to zeroes before use. This avoids persisting leftover data from other - * code (that used the heap and subsequently freed the memory) into the - * data file. Note that many other system libraries may allocate - * and free memory from the heap for arbitrary uses. E.g., stdio may - * use the heap for file I/O buffers. This initialization step has a - * modest performance cost so some applications may want to disable - * it using this flag. This option can be a problem for applications - * which handle sensitive data like passwords, and it makes memory - * checkers like Valgrind noisy. This flag is not needed with #MDB_WRITEMAP, - * which writes directly to the mmap instead of using malloc for pages. The - * initialization is also skipped if #MDB_RESERVE is used; the - * caller is expected to overwrite all of the memory that was - * reserved in that case. - * This flag may be changed at any time using #mdb_env_set_flags(). - *
  • #MDB_PREVMETA - * Open the environment with the previous meta page rather than the latest - * one. This loses the latest transaction, but may help work around some - * types of corruption. - *
- * @param[in] mode The UNIX permissions to set on created files and semaphores. - * This parameter is ignored on Windows. - * @return A non-zero error value on failure and 0 on success. Some possible - * errors are: - *
    - *
  • #MDB_VERSION_MISMATCH - the version of the LMDB library doesn't match the - * version that created the database environment. - *
  • #MDB_INVALID - the environment file headers are corrupted. - *
  • ENOENT - the directory specified by the path parameter doesn't exist. - *
  • EACCES - the user didn't have permission to access the environment files. - *
  • EAGAIN - the environment was locked by another process. - *
- */ -int mdb_env_open(MDB_env *env, const char *path, unsigned int flags, mdb_mode_t mode); - - /** @brief Copy an LMDB environment to the specified path. - * - * This function may be used to make a backup of an existing environment. - * No lockfile is created, since it gets recreated at need. - * @note This call can trigger significant file size growth if run in - * parallel with write transactions, because it employs a read-only - * transaction. See long-lived transactions under @ref caveats_sec. - * @param[in] env An environment handle returned by #mdb_env_create(). It - * must have already been opened successfully. - * @param[in] path The directory in which the copy will reside. This - * directory must already exist and be writable but must otherwise be - * empty. - * @return A non-zero error value on failure and 0 on success. - */ -int mdb_env_copy(MDB_env *env, const char *path); - - /** @brief Copy an LMDB environment to the specified file descriptor. - * - * This function may be used to make a backup of an existing environment. - * No lockfile is created, since it gets recreated at need. - * @note This call can trigger significant file size growth if run in - * parallel with write transactions, because it employs a read-only - * transaction. See long-lived transactions under @ref caveats_sec. - * @param[in] env An environment handle returned by #mdb_env_create(). It - * must have already been opened successfully. - * @param[in] fd The filedescriptor to write the copy to. It must - * have already been opened for Write access. - * @return A non-zero error value on failure and 0 on success. - */ -int mdb_env_copyfd(MDB_env *env, mdb_filehandle_t fd); - - /** @brief Copy an LMDB environment to the specified path, with options. - * - * This function may be used to make a backup of an existing environment. - * No lockfile is created, since it gets recreated at need. - * @note This call can trigger significant file size growth if run in - * parallel with write transactions, because it employs a read-only - * transaction. See long-lived transactions under @ref caveats_sec. - * @param[in] env An environment handle returned by #mdb_env_create(). It - * must have already been opened successfully. - * @param[in] path The directory in which the copy will reside. This - * directory must already exist and be writable but must otherwise be - * empty. - * @param[in] flags Special options for this operation. This parameter - * must be set to 0 or by bitwise OR'ing together one or more of the - * values described here. - *
    - *
  • #MDB_CP_COMPACT - Perform compaction while copying: omit free - * pages and sequentially renumber all pages in output. This option - * consumes more CPU and runs more slowly than the default. - * Currently it fails if the environment has suffered a page leak. - *
- * @return A non-zero error value on failure and 0 on success. - */ -int mdb_env_copy2(MDB_env *env, const char *path, unsigned int flags); - - /** @brief Copy an LMDB environment to the specified file descriptor, - * with options. - * - * This function may be used to make a backup of an existing environment. - * No lockfile is created, since it gets recreated at need. See - * #mdb_env_copy2() for further details. - * @note This call can trigger significant file size growth if run in - * parallel with write transactions, because it employs a read-only - * transaction. See long-lived transactions under @ref caveats_sec. - * @param[in] env An environment handle returned by #mdb_env_create(). It - * must have already been opened successfully. - * @param[in] fd The filedescriptor to write the copy to. It must - * have already been opened for Write access. - * @param[in] flags Special options for this operation. - * See #mdb_env_copy2() for options. - * @return A non-zero error value on failure and 0 on success. - */ -int mdb_env_copyfd2(MDB_env *env, mdb_filehandle_t fd, unsigned int flags); - - /** @brief Return statistics about the LMDB environment. - * - * @param[in] env An environment handle returned by #mdb_env_create() - * @param[out] stat The address of an #MDB_stat structure - * where the statistics will be copied - */ -int mdb_env_stat(MDB_env *env, MDB_stat *stat); - - /** @brief Return information about the LMDB environment. - * - * @param[in] env An environment handle returned by #mdb_env_create() - * @param[out] stat The address of an #MDB_envinfo structure - * where the information will be copied - */ -int mdb_env_info(MDB_env *env, MDB_envinfo *stat); - - /** @brief Flush the data buffers to disk. - * - * Data is always written to disk when #mdb_txn_commit() is called, - * but the operating system may keep it buffered. LMDB always flushes - * the OS buffers upon commit as well, unless the environment was - * opened with #MDB_NOSYNC or in part #MDB_NOMETASYNC. This call is - * not valid if the environment was opened with #MDB_RDONLY. - * @param[in] env An environment handle returned by #mdb_env_create() - * @param[in] force If non-zero, force a synchronous flush. Otherwise - * if the environment has the #MDB_NOSYNC flag set the flushes - * will be omitted, and with #MDB_MAPASYNC they will be asynchronous. - * @return A non-zero error value on failure and 0 on success. Some possible - * errors are: - *
    - *
  • EACCES - the environment is read-only. - *
  • EINVAL - an invalid parameter was specified. - *
  • EIO - an error occurred during synchronization. - *
- */ -int mdb_env_sync(MDB_env *env, int force); - - /** @brief Close the environment and release the memory map. - * - * Only a single thread may call this function. All transactions, databases, - * and cursors must already be closed before calling this function. Attempts to - * use any such handles after calling this function will cause a SIGSEGV. - * The environment handle will be freed and must not be used again after this call. - * @param[in] env An environment handle returned by #mdb_env_create() - */ -void mdb_env_close(MDB_env *env); - - /** @brief Set environment flags. - * - * This may be used to set some flags in addition to those from - * #mdb_env_open(), or to unset these flags. If several threads - * change the flags at the same time, the result is undefined. - * @param[in] env An environment handle returned by #mdb_env_create() - * @param[in] flags The flags to change, bitwise OR'ed together - * @param[in] onoff A non-zero value sets the flags, zero clears them. - * @return A non-zero error value on failure and 0 on success. Some possible - * errors are: - *
    - *
  • EINVAL - an invalid parameter was specified. - *
- */ -int mdb_env_set_flags(MDB_env *env, unsigned int flags, int onoff); - - /** @brief Get environment flags. - * - * @param[in] env An environment handle returned by #mdb_env_create() - * @param[out] flags The address of an integer to store the flags - * @return A non-zero error value on failure and 0 on success. Some possible - * errors are: - *
    - *
  • EINVAL - an invalid parameter was specified. - *
- */ -int mdb_env_get_flags(MDB_env *env, unsigned int *flags); - - /** @brief Return the path that was used in #mdb_env_open(). - * - * @param[in] env An environment handle returned by #mdb_env_create() - * @param[out] path Address of a string pointer to contain the path. This - * is the actual string in the environment, not a copy. It should not be - * altered in any way. - * @return A non-zero error value on failure and 0 on success. Some possible - * errors are: - *
    - *
  • EINVAL - an invalid parameter was specified. - *
- */ -int mdb_env_get_path(MDB_env *env, const char **path); - - /** @brief Return the filedescriptor for the given environment. - * - * This function may be called after fork(), so the descriptor can be - * closed before exec*(). Other LMDB file descriptors have FD_CLOEXEC. - * (Until LMDB 0.9.18, only the lockfile had that.) - * - * @param[in] env An environment handle returned by #mdb_env_create() - * @param[out] fd Address of a mdb_filehandle_t to contain the descriptor. - * @return A non-zero error value on failure and 0 on success. Some possible - * errors are: - *
    - *
  • EINVAL - an invalid parameter was specified. - *
- */ -int mdb_env_get_fd(MDB_env *env, mdb_filehandle_t *fd); - - /** @brief Set the size of the memory map to use for this environment. - * - * The size should be a multiple of the OS page size. The default is - * 10485760 bytes. The size of the memory map is also the maximum size - * of the database. The value should be chosen as large as possible, - * to accommodate future growth of the database. - * This function should be called after #mdb_env_create() and before #mdb_env_open(). - * It may be called at later times if no transactions are active in - * this process. Note that the library does not check for this condition, - * the caller must ensure it explicitly. - * - * The new size takes effect immediately for the current process but - * will not be persisted to any others until a write transaction has been - * committed by the current process. Also, only mapsize increases are - * persisted into the environment. - * - * If the mapsize is increased by another process, and data has grown - * beyond the range of the current mapsize, #mdb_txn_begin() will - * return #MDB_MAP_RESIZED. This function may be called with a size - * of zero to adopt the new size. - * - * Any attempt to set a size smaller than the space already consumed - * by the environment will be silently changed to the current size of the used space. - * @param[in] env An environment handle returned by #mdb_env_create() - * @param[in] size The size in bytes - * @return A non-zero error value on failure and 0 on success. Some possible - * errors are: - *
    - *
  • EINVAL - an invalid parameter was specified, or the environment has - * an active write transaction. - *
- */ -int mdb_env_set_mapsize(MDB_env *env, mdb_size_t size); - - /** @brief Set the maximum number of threads/reader slots for the environment. - * - * This defines the number of slots in the lock table that is used to track readers in the - * the environment. The default is 126. - * Starting a read-only transaction normally ties a lock table slot to the - * current thread until the environment closes or the thread exits. If - * MDB_NOTLS is in use, #mdb_txn_begin() instead ties the slot to the - * MDB_txn object until it or the #MDB_env object is destroyed. - * This function may only be called after #mdb_env_create() and before #mdb_env_open(). - * @param[in] env An environment handle returned by #mdb_env_create() - * @param[in] readers The maximum number of reader lock table slots - * @return A non-zero error value on failure and 0 on success. Some possible - * errors are: - *
    - *
  • EINVAL - an invalid parameter was specified, or the environment is already open. - *
- */ -int mdb_env_set_maxreaders(MDB_env *env, unsigned int readers); - - /** @brief Get the maximum number of threads/reader slots for the environment. - * - * @param[in] env An environment handle returned by #mdb_env_create() - * @param[out] readers Address of an integer to store the number of readers - * @return A non-zero error value on failure and 0 on success. Some possible - * errors are: - *
    - *
  • EINVAL - an invalid parameter was specified. - *
- */ -int mdb_env_get_maxreaders(MDB_env *env, unsigned int *readers); - - /** @brief Set the maximum number of named databases for the environment. - * - * This function is only needed if multiple databases will be used in the - * environment. Simpler applications that use the environment as a single - * unnamed database can ignore this option. - * This function may only be called after #mdb_env_create() and before #mdb_env_open(). - * - * Currently a moderate number of slots are cheap but a huge number gets - * expensive: 7-120 words per transaction, and every #mdb_dbi_open() - * does a linear search of the opened slots. - * @param[in] env An environment handle returned by #mdb_env_create() - * @param[in] dbs The maximum number of databases - * @return A non-zero error value on failure and 0 on success. Some possible - * errors are: - *
    - *
  • EINVAL - an invalid parameter was specified, or the environment is already open. - *
- */ -int mdb_env_set_maxdbs(MDB_env *env, MDB_dbi dbs); - - /** @brief Get the maximum size of keys and #MDB_DUPSORT data we can write. - * - * Depends on the compile-time constant #MDB_MAXKEYSIZE. Default 511. - * See @ref MDB_val. - * @param[in] env An environment handle returned by #mdb_env_create() - * @return The maximum size of a key we can write - */ -int mdb_env_get_maxkeysize(MDB_env *env); - - /** @brief Set application information associated with the #MDB_env. - * - * @param[in] env An environment handle returned by #mdb_env_create() - * @param[in] ctx An arbitrary pointer for whatever the application needs. - * @return A non-zero error value on failure and 0 on success. - */ -int mdb_env_set_userctx(MDB_env *env, void *ctx); - - /** @brief Get the application information associated with the #MDB_env. - * - * @param[in] env An environment handle returned by #mdb_env_create() - * @return The pointer set by #mdb_env_set_userctx(). - */ -void *mdb_env_get_userctx(MDB_env *env); - - /** @brief A callback function for most LMDB assert() failures, - * called before printing the message and aborting. - * - * @param[in] env An environment handle returned by #mdb_env_create(). - * @param[in] msg The assertion message, not including newline. - */ -typedef void MDB_assert_func(MDB_env *env, const char *msg); - - /** Set or reset the assert() callback of the environment. - * Disabled if liblmdb is buillt with NDEBUG. - * @note This hack should become obsolete as lmdb's error handling matures. - * @param[in] env An environment handle returned by #mdb_env_create(). - * @param[in] func An #MDB_assert_func function, or 0. - * @return A non-zero error value on failure and 0 on success. - */ -int mdb_env_set_assert(MDB_env *env, MDB_assert_func *func); - - /** @brief Create a transaction for use with the environment. - * - * The transaction handle may be discarded using #mdb_txn_abort() or #mdb_txn_commit(). - * @note A transaction and its cursors must only be used by a single - * thread, and a thread may only have a single transaction at a time. - * If #MDB_NOTLS is in use, this does not apply to read-only transactions. - * @note Cursors may not span transactions. - * @param[in] env An environment handle returned by #mdb_env_create() - * @param[in] parent If this parameter is non-NULL, the new transaction - * will be a nested transaction, with the transaction indicated by \b parent - * as its parent. Transactions may be nested to any level. A parent - * transaction and its cursors may not issue any other operations than - * mdb_txn_commit and mdb_txn_abort while it has active child transactions. - * @param[in] flags Special options for this transaction. This parameter - * must be set to 0 or by bitwise OR'ing together one or more of the - * values described here. - *
    - *
  • #MDB_RDONLY - * This transaction will not perform any write operations. - *
  • #MDB_NOSYNC - * Don't flush system buffers to disk when committing this transaction. - *
  • #MDB_NOMETASYNC - * Flush system buffers but omit metadata flush when committing this transaction. - *
- * @param[out] txn Address where the new #MDB_txn handle will be stored - * @return A non-zero error value on failure and 0 on success. Some possible - * errors are: - *
    - *
  • #MDB_PANIC - a fatal error occurred earlier and the environment - * must be shut down. - *
  • #MDB_MAP_RESIZED - another process wrote data beyond this MDB_env's - * mapsize and this environment's map must be resized as well. - * See #mdb_env_set_mapsize(). - *
  • #MDB_READERS_FULL - a read-only transaction was requested and - * the reader lock table is full. See #mdb_env_set_maxreaders(). - *
  • ENOMEM - out of memory. - *
- */ -int mdb_txn_begin(MDB_env *env, MDB_txn *parent, unsigned int flags, MDB_txn **txn); - - /** @brief Returns the transaction's #MDB_env - * - * @param[in] txn A transaction handle returned by #mdb_txn_begin() - */ -MDB_env *mdb_txn_env(MDB_txn *txn); - - /** @brief Return the transaction's ID. - * - * This returns the identifier associated with this transaction. For a - * read-only transaction, this corresponds to the snapshot being read; - * concurrent readers will frequently have the same transaction ID. - * - * @param[in] txn A transaction handle returned by #mdb_txn_begin() - * @return A transaction ID, valid if input is an active transaction. - */ -mdb_size_t mdb_txn_id(MDB_txn *txn); - - /** @brief Commit all the operations of a transaction into the database. - * - * The transaction handle is freed. It and its cursors must not be used - * again after this call, except with #mdb_cursor_renew(). - * @note Earlier documentation incorrectly said all cursors would be freed. - * Only write-transactions free cursors. - * @param[in] txn A transaction handle returned by #mdb_txn_begin() - * @return A non-zero error value on failure and 0 on success. Some possible - * errors are: - *
    - *
  • EINVAL - an invalid parameter was specified. - *
  • ENOSPC - no more disk space. - *
  • EIO - a low-level I/O error occurred while writing. - *
  • ENOMEM - out of memory. - *
- */ -int mdb_txn_commit(MDB_txn *txn); - - /** @brief Abandon all the operations of the transaction instead of saving them. - * - * The transaction handle is freed. It and its cursors must not be used - * again after this call, except with #mdb_cursor_renew(). - * @note Earlier documentation incorrectly said all cursors would be freed. - * Only write-transactions free cursors. - * @param[in] txn A transaction handle returned by #mdb_txn_begin() - */ -void mdb_txn_abort(MDB_txn *txn); - - /** @brief Reset a read-only transaction. - * - * Abort the transaction like #mdb_txn_abort(), but keep the transaction - * handle. #mdb_txn_renew() may reuse the handle. This saves allocation - * overhead if the process will start a new read-only transaction soon, - * and also locking overhead if #MDB_NOTLS is in use. The reader table - * lock is released, but the table slot stays tied to its thread or - * #MDB_txn. Use mdb_txn_abort() to discard a reset handle, and to free - * its lock table slot if MDB_NOTLS is in use. - * Cursors opened within the transaction must not be used - * again after this call, except with #mdb_cursor_renew(). - * Reader locks generally don't interfere with writers, but they keep old - * versions of database pages allocated. Thus they prevent the old pages - * from being reused when writers commit new data, and so under heavy load - * the database size may grow much more rapidly than otherwise. - * @param[in] txn A transaction handle returned by #mdb_txn_begin() - */ -void mdb_txn_reset(MDB_txn *txn); - - /** @brief Renew a read-only transaction. - * - * This acquires a new reader lock for a transaction handle that had been - * released by #mdb_txn_reset(). It must be called before a reset transaction - * may be used again. - * @param[in] txn A transaction handle returned by #mdb_txn_begin() - * @return A non-zero error value on failure and 0 on success. Some possible - * errors are: - *
    - *
  • #MDB_PANIC - a fatal error occurred earlier and the environment - * must be shut down. - *
  • EINVAL - an invalid parameter was specified. - *
- */ -int mdb_txn_renew(MDB_txn *txn); - -/** Compat with version <= 0.9.4, avoid clash with libmdb from MDB Tools project */ -#define mdb_open(txn,name,flags,dbi) mdb_dbi_open(txn,name,flags,dbi) -/** Compat with version <= 0.9.4, avoid clash with libmdb from MDB Tools project */ -#define mdb_close(env,dbi) mdb_dbi_close(env,dbi) - - /** @brief Open a database in the environment. - * - * A database handle denotes the name and parameters of a database, - * independently of whether such a database exists. - * The database handle may be discarded by calling #mdb_dbi_close(). - * The old database handle is returned if the database was already open. - * The handle may only be closed once. - * - * The database handle will be private to the current transaction until - * the transaction is successfully committed. If the transaction is - * aborted the handle will be closed automatically. - * After a successful commit the handle will reside in the shared - * environment, and may be used by other transactions. - * - * This function must not be called from multiple concurrent - * transactions in the same process. A transaction that uses - * this function must finish (either commit or abort) before - * any other transaction in the process may use this function. - * - * To use named databases (with name != NULL), #mdb_env_set_maxdbs() - * must be called before opening the environment. Database names are - * keys in the unnamed database, and may be read but not written. - * - * @param[in] txn A transaction handle returned by #mdb_txn_begin() - * @param[in] name The name of the database to open. If only a single - * database is needed in the environment, this value may be NULL. - * @param[in] flags Special options for this database. This parameter - * must be set to 0 or by bitwise OR'ing together one or more of the - * values described here. - *
    - *
  • #MDB_REVERSEKEY - * Keys are strings to be compared in reverse order, from the end - * of the strings to the beginning. By default, Keys are treated as strings and - * compared from beginning to end. - *
  • #MDB_DUPSORT - * Duplicate keys may be used in the database. (Or, from another perspective, - * keys may have multiple data items, stored in sorted order.) By default - * keys must be unique and may have only a single data item. - *
  • #MDB_INTEGERKEY - * Keys are binary integers in native byte order, either unsigned int - * or #mdb_size_t, and will be sorted as such. - * (lmdb expects 32-bit int <= size_t <= 32/64-bit mdb_size_t.) - * The keys must all be of the same size. - *
  • #MDB_DUPFIXED - * This flag may only be used in combination with #MDB_DUPSORT. This option - * tells the library that the data items for this database are all the same - * size, which allows further optimizations in storage and retrieval. When - * all data items are the same size, the #MDB_GET_MULTIPLE, #MDB_NEXT_MULTIPLE - * and #MDB_PREV_MULTIPLE cursor operations may be used to retrieve multiple - * items at once. - *
  • #MDB_INTEGERDUP - * This option specifies that duplicate data items are binary integers, - * similar to #MDB_INTEGERKEY keys. - *
  • #MDB_REVERSEDUP - * This option specifies that duplicate data items should be compared as - * strings in reverse order. - *
  • #MDB_CREATE - * Create the named database if it doesn't exist. This option is not - * allowed in a read-only transaction or a read-only environment. - *
- * @param[out] dbi Address where the new #MDB_dbi handle will be stored - * @return A non-zero error value on failure and 0 on success. Some possible - * errors are: - *
    - *
  • #MDB_NOTFOUND - the specified database doesn't exist in the environment - * and #MDB_CREATE was not specified. - *
  • #MDB_DBS_FULL - too many databases have been opened. See #mdb_env_set_maxdbs(). - *
- */ -int mdb_dbi_open(MDB_txn *txn, const char *name, unsigned int flags, MDB_dbi *dbi); - - /** @brief Retrieve statistics for a database. - * - * @param[in] txn A transaction handle returned by #mdb_txn_begin() - * @param[in] dbi A database handle returned by #mdb_dbi_open() - * @param[out] stat The address of an #MDB_stat structure - * where the statistics will be copied - * @return A non-zero error value on failure and 0 on success. Some possible - * errors are: - *
    - *
  • EINVAL - an invalid parameter was specified. - *
- */ -int mdb_stat(MDB_txn *txn, MDB_dbi dbi, MDB_stat *stat); - - /** @brief Retrieve the DB flags for a database handle. - * - * @param[in] txn A transaction handle returned by #mdb_txn_begin() - * @param[in] dbi A database handle returned by #mdb_dbi_open() - * @param[out] flags Address where the flags will be returned. - * @return A non-zero error value on failure and 0 on success. - */ -int mdb_dbi_flags(MDB_txn *txn, MDB_dbi dbi, unsigned int *flags); - - /** @brief Close a database handle. Normally unnecessary. Use with care: - * - * This call is not mutex protected. Handles should only be closed by - * a single thread, and only if no other threads are going to reference - * the database handle or one of its cursors any further. Do not close - * a handle if an existing transaction has modified its database. - * Doing so can cause misbehavior from database corruption to errors - * like MDB_BAD_VALSIZE (since the DB name is gone). - * - * Closing a database handle is not necessary, but lets #mdb_dbi_open() - * reuse the handle value. Usually it's better to set a bigger - * #mdb_env_set_maxdbs(), unless that value would be large. - * - * @param[in] env An environment handle returned by #mdb_env_create() - * @param[in] dbi A database handle returned by #mdb_dbi_open() - */ -void mdb_dbi_close(MDB_env *env, MDB_dbi dbi); - - /** @brief Empty or delete+close a database. - * - * See #mdb_dbi_close() for restrictions about closing the DB handle. - * @param[in] txn A transaction handle returned by #mdb_txn_begin() - * @param[in] dbi A database handle returned by #mdb_dbi_open() - * @param[in] del 0 to empty the DB, 1 to delete it from the - * environment and close the DB handle. - * @return A non-zero error value on failure and 0 on success. - */ -int mdb_drop(MDB_txn *txn, MDB_dbi dbi, int del); - - /** @brief Set a custom key comparison function for a database. - * - * The comparison function is called whenever it is necessary to compare a - * key specified by the application with a key currently stored in the database. - * If no comparison function is specified, and no special key flags were specified - * with #mdb_dbi_open(), the keys are compared lexically, with shorter keys collating - * before longer keys. - * @warning This function must be called before any data access functions are used, - * otherwise data corruption may occur. The same comparison function must be used by every - * program accessing the database, every time the database is used. - * @param[in] txn A transaction handle returned by #mdb_txn_begin() - * @param[in] dbi A database handle returned by #mdb_dbi_open() - * @param[in] cmp A #MDB_cmp_func function - * @return A non-zero error value on failure and 0 on success. Some possible - * errors are: - *
    - *
  • EINVAL - an invalid parameter was specified. - *
- */ -int mdb_set_compare(MDB_txn *txn, MDB_dbi dbi, MDB_cmp_func *cmp); - - /** @brief Set a custom data comparison function for a #MDB_DUPSORT database. - * - * This comparison function is called whenever it is necessary to compare a data - * item specified by the application with a data item currently stored in the database. - * This function only takes effect if the database was opened with the #MDB_DUPSORT - * flag. - * If no comparison function is specified, and no special key flags were specified - * with #mdb_dbi_open(), the data items are compared lexically, with shorter items collating - * before longer items. - * @warning This function must be called before any data access functions are used, - * otherwise data corruption may occur. The same comparison function must be used by every - * program accessing the database, every time the database is used. - * @param[in] txn A transaction handle returned by #mdb_txn_begin() - * @param[in] dbi A database handle returned by #mdb_dbi_open() - * @param[in] cmp A #MDB_cmp_func function - * @return A non-zero error value on failure and 0 on success. Some possible - * errors are: - *
    - *
  • EINVAL - an invalid parameter was specified. - *
- */ -int mdb_set_dupsort(MDB_txn *txn, MDB_dbi dbi, MDB_cmp_func *cmp); - - /** @brief Set a relocation function for a #MDB_FIXEDMAP database. - * - * @todo The relocation function is called whenever it is necessary to move the data - * of an item to a different position in the database (e.g. through tree - * balancing operations, shifts as a result of adds or deletes, etc.). It is - * intended to allow address/position-dependent data items to be stored in - * a database in an environment opened with the #MDB_FIXEDMAP option. - * Currently the relocation feature is unimplemented and setting - * this function has no effect. - * @param[in] txn A transaction handle returned by #mdb_txn_begin() - * @param[in] dbi A database handle returned by #mdb_dbi_open() - * @param[in] rel A #MDB_rel_func function - * @return A non-zero error value on failure and 0 on success. Some possible - * errors are: - *
    - *
  • EINVAL - an invalid parameter was specified. - *
- */ -int mdb_set_relfunc(MDB_txn *txn, MDB_dbi dbi, MDB_rel_func *rel); - - /** @brief Set a context pointer for a #MDB_FIXEDMAP database's relocation function. - * - * See #mdb_set_relfunc and #MDB_rel_func for more details. - * @param[in] txn A transaction handle returned by #mdb_txn_begin() - * @param[in] dbi A database handle returned by #mdb_dbi_open() - * @param[in] ctx An arbitrary pointer for whatever the application needs. - * It will be passed to the callback function set by #mdb_set_relfunc - * as its \b relctx parameter whenever the callback is invoked. - * @return A non-zero error value on failure and 0 on success. Some possible - * errors are: - *
    - *
  • EINVAL - an invalid parameter was specified. - *
- */ -int mdb_set_relctx(MDB_txn *txn, MDB_dbi dbi, void *ctx); - - /** @brief Get items from a database. - * - * This function retrieves key/data pairs from the database. The address - * and length of the data associated with the specified \b key are returned - * in the structure to which \b data refers. - * If the database supports duplicate keys (#MDB_DUPSORT) then the - * first data item for the key will be returned. Retrieval of other - * items requires the use of #mdb_cursor_get(). - * - * @note The memory pointed to by the returned values is owned by the - * database. The caller need not dispose of the memory, and may not - * modify it in any way. For values returned in a read-only transaction - * any modification attempts will cause a SIGSEGV. - * @note Values returned from the database are valid only until a - * subsequent update operation, or the end of the transaction. - * @param[in] txn A transaction handle returned by #mdb_txn_begin() - * @param[in] dbi A database handle returned by #mdb_dbi_open() - * @param[in] key The key to search for in the database - * @param[out] data The data corresponding to the key - * @return A non-zero error value on failure and 0 on success. Some possible - * errors are: - *
    - *
  • #MDB_NOTFOUND - the key was not in the database. - *
  • EINVAL - an invalid parameter was specified. - *
- */ -int mdb_get(MDB_txn *txn, MDB_dbi dbi, MDB_val *key, MDB_val *data); - - /** @brief Store items into a database. - * - * This function stores key/data pairs in the database. The default behavior - * is to enter the new key/data pair, replacing any previously existing key - * if duplicates are disallowed, or adding a duplicate data item if - * duplicates are allowed (#MDB_DUPSORT). - * @param[in] txn A transaction handle returned by #mdb_txn_begin() - * @param[in] dbi A database handle returned by #mdb_dbi_open() - * @param[in] key The key to store in the database - * @param[in,out] data The data to store - * @param[in] flags Special options for this operation. This parameter - * must be set to 0 or by bitwise OR'ing together one or more of the - * values described here. - *
    - *
  • #MDB_NODUPDATA - enter the new key/data pair only if it does not - * already appear in the database. This flag may only be specified - * if the database was opened with #MDB_DUPSORT. The function will - * return #MDB_KEYEXIST if the key/data pair already appears in the - * database. - *
  • #MDB_NOOVERWRITE - enter the new key/data pair only if the key - * does not already appear in the database. The function will return - * #MDB_KEYEXIST if the key already appears in the database, even if - * the database supports duplicates (#MDB_DUPSORT). The \b data - * parameter will be set to point to the existing item. - *
  • #MDB_RESERVE - reserve space for data of the given size, but - * don't copy the given data. Instead, return a pointer to the - * reserved space, which the caller can fill in later - before - * the next update operation or the transaction ends. This saves - * an extra memcpy if the data is being generated later. - * LMDB does nothing else with this memory, the caller is expected - * to modify all of the space requested. This flag must not be - * specified if the database was opened with #MDB_DUPSORT. - *
  • #MDB_APPEND - append the given key/data pair to the end of the - * database. This option allows fast bulk loading when keys are - * already known to be in the correct order. Loading unsorted keys - * with this flag will cause a #MDB_KEYEXIST error. - *
  • #MDB_APPENDDUP - as above, but for sorted dup data. - *
- * @return A non-zero error value on failure and 0 on success. Some possible - * errors are: - *
    - *
  • #MDB_MAP_FULL - the database is full, see #mdb_env_set_mapsize(). - *
  • #MDB_TXN_FULL - the transaction has too many dirty pages. - *
  • EACCES - an attempt was made to write in a read-only transaction. - *
  • EINVAL - an invalid parameter was specified. - *
- */ -int mdb_put(MDB_txn *txn, MDB_dbi dbi, MDB_val *key, MDB_val *data, - unsigned int flags); - - /** @brief Delete items from a database. - * - * This function removes key/data pairs from the database. - * If the database does not support sorted duplicate data items - * (#MDB_DUPSORT) the data parameter is ignored. - * If the database supports sorted duplicates and the data parameter - * is NULL, all of the duplicate data items for the key will be - * deleted. Otherwise, if the data parameter is non-NULL - * only the matching data item will be deleted. - * This function will return #MDB_NOTFOUND if the specified key/data - * pair is not in the database. - * @param[in] txn A transaction handle returned by #mdb_txn_begin() - * @param[in] dbi A database handle returned by #mdb_dbi_open() - * @param[in] key The key to delete from the database - * @param[in] data The data to delete - * @return A non-zero error value on failure and 0 on success. Some possible - * errors are: - *
    - *
  • EACCES - an attempt was made to write in a read-only transaction. - *
  • EINVAL - an invalid parameter was specified. - *
- */ -int mdb_del(MDB_txn *txn, MDB_dbi dbi, MDB_val *key, MDB_val *data); - - /** @brief Create a cursor handle. - * - * A cursor is associated with a specific transaction and database. - * A cursor cannot be used when its database handle is closed. Nor - * when its transaction has ended, except with #mdb_cursor_renew(). - * It can be discarded with #mdb_cursor_close(). - * A cursor in a write-transaction can be closed before its transaction - * ends, and will otherwise be closed when its transaction ends. - * A cursor in a read-only transaction must be closed explicitly, before - * or after its transaction ends. It can be reused with - * #mdb_cursor_renew() before finally closing it. - * @note Earlier documentation said that cursors in every transaction - * were closed when the transaction committed or aborted. - * @param[in] txn A transaction handle returned by #mdb_txn_begin() - * @param[in] dbi A database handle returned by #mdb_dbi_open() - * @param[out] cursor Address where the new #MDB_cursor handle will be stored - * @return A non-zero error value on failure and 0 on success. Some possible - * errors are: - *
    - *
  • EINVAL - an invalid parameter was specified. - *
- */ -int mdb_cursor_open(MDB_txn *txn, MDB_dbi dbi, MDB_cursor **cursor); - - /** @brief Close a cursor handle. - * - * The cursor handle will be freed and must not be used again after this call. - * Its transaction must still be live if it is a write-transaction. - * @param[in] cursor A cursor handle returned by #mdb_cursor_open() - */ -void mdb_cursor_close(MDB_cursor *cursor); - - /** @brief Renew a cursor handle. - * - * A cursor is associated with a specific transaction and database. - * Cursors that are only used in read-only - * transactions may be re-used, to avoid unnecessary malloc/free overhead. - * The cursor may be associated with a new read-only transaction, and - * referencing the same database handle as it was created with. - * This may be done whether the previous transaction is live or dead. - * @param[in] txn A transaction handle returned by #mdb_txn_begin() - * @param[in] cursor A cursor handle returned by #mdb_cursor_open() - * @return A non-zero error value on failure and 0 on success. Some possible - * errors are: - *
    - *
  • EINVAL - an invalid parameter was specified. - *
- */ -int mdb_cursor_renew(MDB_txn *txn, MDB_cursor *cursor); - - /** @brief Return the cursor's transaction handle. - * - * @param[in] cursor A cursor handle returned by #mdb_cursor_open() - */ -MDB_txn *mdb_cursor_txn(MDB_cursor *cursor); - - /** @brief Return the cursor's database handle. - * - * @param[in] cursor A cursor handle returned by #mdb_cursor_open() - */ -MDB_dbi mdb_cursor_dbi(MDB_cursor *cursor); - - /** @brief Retrieve by cursor. - * - * This function retrieves key/data pairs from the database. The address and length - * of the key are returned in the object to which \b key refers (except for the - * case of the #MDB_SET option, in which the \b key object is unchanged), and - * the address and length of the data are returned in the object to which \b data - * refers. - * See #mdb_get() for restrictions on using the output values. - * @param[in] cursor A cursor handle returned by #mdb_cursor_open() - * @param[in,out] key The key for a retrieved item - * @param[in,out] data The data of a retrieved item - * @param[in] op A cursor operation #MDB_cursor_op - * @return A non-zero error value on failure and 0 on success. Some possible - * errors are: - *
    - *
  • #MDB_NOTFOUND - no matching key found. - *
  • EINVAL - an invalid parameter was specified. - *
- */ -int mdb_cursor_get(MDB_cursor *cursor, MDB_val *key, MDB_val *data, - MDB_cursor_op op); - - /** @brief Store by cursor. - * - * This function stores key/data pairs into the database. - * The cursor is positioned at the new item, or on failure usually near it. - * @note Earlier documentation incorrectly said errors would leave the - * state of the cursor unchanged. - * @param[in] cursor A cursor handle returned by #mdb_cursor_open() - * @param[in] key The key operated on. - * @param[in] data The data operated on. - * @param[in] flags Options for this operation. This parameter - * must be set to 0 or one of the values described here. - *
    - *
  • #MDB_CURRENT - replace the item at the current cursor position. - * The \b key parameter must still be provided, and must match it. - * If using sorted duplicates (#MDB_DUPSORT) the data item must still - * sort into the same place. This is intended to be used when the - * new data is the same size as the old. Otherwise it will simply - * perform a delete of the old record followed by an insert. - *
  • #MDB_NODUPDATA - enter the new key/data pair only if it does not - * already appear in the database. This flag may only be specified - * if the database was opened with #MDB_DUPSORT. The function will - * return #MDB_KEYEXIST if the key/data pair already appears in the - * database. - *
  • #MDB_NOOVERWRITE - enter the new key/data pair only if the key - * does not already appear in the database. The function will return - * #MDB_KEYEXIST if the key already appears in the database, even if - * the database supports duplicates (#MDB_DUPSORT). - *
  • #MDB_RESERVE - reserve space for data of the given size, but - * don't copy the given data. Instead, return a pointer to the - * reserved space, which the caller can fill in later - before - * the next update operation or the transaction ends. This saves - * an extra memcpy if the data is being generated later. This flag - * must not be specified if the database was opened with #MDB_DUPSORT. - *
  • #MDB_APPEND - append the given key/data pair to the end of the - * database. No key comparisons are performed. This option allows - * fast bulk loading when keys are already known to be in the - * correct order. Loading unsorted keys with this flag will cause - * a #MDB_KEYEXIST error. - *
  • #MDB_APPENDDUP - as above, but for sorted dup data. - *
  • #MDB_MULTIPLE - store multiple contiguous data elements in a - * single request. This flag may only be specified if the database - * was opened with #MDB_DUPFIXED. The \b data argument must be an - * array of two MDB_vals. The mv_size of the first MDB_val must be - * the size of a single data element. The mv_data of the first MDB_val - * must point to the beginning of the array of contiguous data elements. - * The mv_size of the second MDB_val must be the count of the number - * of data elements to store. On return this field will be set to - * the count of the number of elements actually written. The mv_data - * of the second MDB_val is unused. - *
- * @return A non-zero error value on failure and 0 on success. Some possible - * errors are: - *
    - *
  • #MDB_MAP_FULL - the database is full, see #mdb_env_set_mapsize(). - *
  • #MDB_TXN_FULL - the transaction has too many dirty pages. - *
  • EACCES - an attempt was made to write in a read-only transaction. - *
  • EINVAL - an invalid parameter was specified. - *
- */ -int mdb_cursor_put(MDB_cursor *cursor, MDB_val *key, MDB_val *data, - unsigned int flags); - - /** @brief Delete current key/data pair - * - * This function deletes the key/data pair to which the cursor refers. - * @param[in] cursor A cursor handle returned by #mdb_cursor_open() - * @param[in] flags Options for this operation. This parameter - * must be set to 0 or one of the values described here. - *
    - *
  • #MDB_NODUPDATA - delete all of the data items for the current key. - * This flag may only be specified if the database was opened with #MDB_DUPSORT. - *
- * @return A non-zero error value on failure and 0 on success. Some possible - * errors are: - *
    - *
  • EACCES - an attempt was made to write in a read-only transaction. - *
  • EINVAL - an invalid parameter was specified. - *
- */ -int mdb_cursor_del(MDB_cursor *cursor, unsigned int flags); - - /** @brief Return count of duplicates for current key. - * - * This call is only valid on databases that support sorted duplicate - * data items #MDB_DUPSORT. - * @param[in] cursor A cursor handle returned by #mdb_cursor_open() - * @param[out] countp Address where the count will be stored - * @return A non-zero error value on failure and 0 on success. Some possible - * errors are: - *
    - *
  • EINVAL - cursor is not initialized, or an invalid parameter was specified. - *
- */ -int mdb_cursor_count(MDB_cursor *cursor, mdb_size_t *countp); - - /** @brief Compare two data items according to a particular database. - * - * This returns a comparison as if the two data items were keys in the - * specified database. - * @param[in] txn A transaction handle returned by #mdb_txn_begin() - * @param[in] dbi A database handle returned by #mdb_dbi_open() - * @param[in] a The first item to compare - * @param[in] b The second item to compare - * @return < 0 if a < b, 0 if a == b, > 0 if a > b - */ -int mdb_cmp(MDB_txn *txn, MDB_dbi dbi, const MDB_val *a, const MDB_val *b); - - /** @brief Compare two data items according to a particular database. - * - * This returns a comparison as if the two items were data items of - * the specified database. The database must have the #MDB_DUPSORT flag. - * @param[in] txn A transaction handle returned by #mdb_txn_begin() - * @param[in] dbi A database handle returned by #mdb_dbi_open() - * @param[in] a The first item to compare - * @param[in] b The second item to compare - * @return < 0 if a < b, 0 if a == b, > 0 if a > b - */ -int mdb_dcmp(MDB_txn *txn, MDB_dbi dbi, const MDB_val *a, const MDB_val *b); - - /** @brief A callback function used to print a message from the library. - * - * @param[in] msg The string to be printed. - * @param[in] ctx An arbitrary context pointer for the callback. - * @return < 0 on failure, >= 0 on success. - */ -typedef int (MDB_msg_func)(const char *msg, void *ctx); - - /** @brief Dump the entries in the reader lock table. - * - * @param[in] env An environment handle returned by #mdb_env_create() - * @param[in] func A #MDB_msg_func function - * @param[in] ctx Anything the message function needs - * @return < 0 on failure, >= 0 on success. - */ -int mdb_reader_list(MDB_env *env, MDB_msg_func *func, void *ctx); - - /** @brief Check for stale entries in the reader lock table. - * - * @param[in] env An environment handle returned by #mdb_env_create() - * @param[out] dead Number of stale slots that were cleared - * @return 0 on success, non-zero on failure. - */ -int mdb_reader_check(MDB_env *env, int *dead); -/** @} */ - -#ifdef __cplusplus -} -#endif -/** @page tools LMDB Command Line Tools - The following describes the command line tools that are available for LMDB. - \li \ref mdb_copy_1 - \li \ref mdb_dump_1 - \li \ref mdb_load_1 - \li \ref mdb_stat_1 -*/ - -#endif /* _LMDB_H_ */ diff --git a/vendors/ocaml-lmdb/src/lmdb.ml b/vendors/ocaml-lmdb/src/lmdb.ml deleted file mode 100644 index 9cbca4f39961..000000000000 --- a/vendors/ocaml-lmdb/src/lmdb.ml +++ /dev/null @@ -1,648 +0,0 @@ -(*--------------------------------------------------------------------------- - Copyright (c) 2018 Vincent Bernardoff. All rights reserved. - Distributed under the ISC license, see terms at the end of the file. - ---------------------------------------------------------------------------*) - -module Option = struct - let map ~f = function - | None -> None - | Some v -> Some (f v) -end - -let finalize ~final ~f = - try - let res = f () in - final () ; - res - with exn -> - final () ; - raise exn - -open Rresult - -type error = - | NoSuchFileOrDir - | IOError - | EnvironmentLocked - | OutOfMemory - | PermissionDenied - | InvalidArgument - | NoSpaceLeftOnDevice - | KeyExist - | KeyNotFound - | PageNotFound - | Corrupted - | Panic - | VersionMismatch - | InvalidFile - | MapFull - | DbsFull - | ReadersFull - | TLSFull - | TxnFull - | CursorFull - | PageFull - | MapResized - | Incompatible - | BadRslot - | BadTxn - | BadValSize - | BadDbi - | TxnProblem - -let int_of_error = function - | NoSuchFileOrDir -> 2 - | IOError -> 5 - | EnvironmentLocked -> 11 - | OutOfMemory -> 12 - | PermissionDenied -> 13 - | InvalidArgument -> 22 - | NoSpaceLeftOnDevice -> 28 - | KeyExist -> -30799 - | KeyNotFound -> -30798 - | PageNotFound -> -30797 - | Corrupted -> -30796 - | Panic -> -30795 - | VersionMismatch -> -30794 - | InvalidFile -> -30793 - | MapFull -> -30792 - | DbsFull -> -30791 - | ReadersFull -> -30790 - | TLSFull -> -30789 - | TxnFull -> -30788 - | CursorFull -> -30787 - | PageFull -> -30786 - | MapResized -> -30785 - | Incompatible -> -30784 - | BadRslot -> -30783 - | BadTxn -> -30782 - | BadValSize -> -30781 - | BadDbi -> -30780 - | TxnProblem -> -30779 - -let error_of_int = function - | 2 -> NoSuchFileOrDir - | 5 -> IOError - | 11 -> EnvironmentLocked - | 12 -> OutOfMemory - | 13 -> PermissionDenied - | 22 -> InvalidArgument - | 28 -> NoSpaceLeftOnDevice - | -30799 -> KeyExist - | -30798 -> KeyNotFound - | -30797 -> PageNotFound - | -30796 -> Corrupted - | -30795 -> Panic - | -30794 -> VersionMismatch - | -30793 -> InvalidFile - | -30792 -> MapFull - | -30791 -> DbsFull - | -30790 -> ReadersFull - | -30789 -> TLSFull - | -30788 -> TxnFull - | -30787 -> CursorFull - | -30786 -> PageFull - | -30785 -> MapResized - | -30784 -> Incompatible - | -30783 -> BadRslot - | -30782 -> BadTxn - | -30781 -> BadValSize - | -30780 -> BadDbi - | -30779 -> TxnProblem - | i -> invalid_arg (Printf.sprintf "error_of_int: %d" i) - -type version = { - major : int ; - minor : int ; - patch : int ; -} - -external version : unit -> version = "stub_mdb_version" -external strerror : int -> string = "stub_mdb_strerror" - -let string_of_error error = - strerror (int_of_error error) - -let pp_error ppf err = - Format.fprintf ppf "%s" (string_of_error err) - -let to_msg t = R.error_to_msg ~pp_error t - -type t -external create : unit -> (t, int) result = "stub_mdb_env_create" - -type flag_env = - | FixedMap - | NoSubdir - | NoSync - | RdOnly - | NoMetaSync - | WriteMap - | MapAsync - | NoTLS - | NoLock - | NoRdAhead - | NoMemInit - | PrevMeta - -let int_of_flag_env = function - | FixedMap -> 0x01 - | NoSubdir -> 0x4000 - | NoSync -> 0x10_000 - | RdOnly -> 0x20_000 - | NoMetaSync -> 0x40_000 - | WriteMap -> 0x80_000 - | MapAsync -> 0x100_000 - | NoTLS -> 0x200_000 - | NoLock -> 0x400_000 - | NoRdAhead -> 0x800_000 - | NoMemInit -> 0x1_000_000 - | PrevMeta -> 0x2_000_000 - -let flags_env_of_int v = - List.fold_left begin fun acc flag -> - if v land (int_of_flag_env flag) <> 0 then flag :: acc else acc - end [] - [ FixedMap ; NoSubdir ; NoSync ; RdOnly ; NoMetaSync ; - WriteMap ; MapAsync ; NoTLS ; NoLock ; NoRdAhead ; - NoMemInit ; PrevMeta ] - -type flag_open = - | ReverseKey - | DupSort - | IntegerKey - | DupFixed - | IntegerDup - | ReverseDup - | Create - -let int_of_flag_open = function - | ReverseKey -> 0x02 - | DupSort -> 0x04 - | IntegerKey -> 0x08 - | DupFixed -> 0x10 - | IntegerDup -> 0x20 - | ReverseDup -> 0x40 - | Create -> 0x40_000 - -let flags_open_of_int v = - List.fold_left begin fun acc flag -> - if v land (int_of_flag_open flag) <> 0 then flag :: acc else acc - end [] - [ ReverseKey ; DupSort ; IntegerKey ; DupFixed ; IntegerDup ; - ReverseDup ; Create ] - -type flag_put = - | NoOverwrite - | NoDupData - | Current - | Reserve - | Append - | AppendDup - | Multiple - -let int_of_flag_put = function - | NoOverwrite -> 0x10 - | NoDupData -> 0x20 - | Current -> 0x40 - | Reserve -> 0x10_000 - | Append -> 0x20_000 - | AppendDup -> 0x40_000 - | Multiple -> 0x80_000 - -let fold_flags int_of_flag flags = - List.fold_left (fun a flag -> a lor (int_of_flag flag)) 0 flags - -let int_of_flags_env = fold_flags int_of_flag_env -let int_of_flags_open = fold_flags int_of_flag_open -let int_of_flags_put = fold_flags int_of_flag_put - -let return ?(on_error = fun () -> ()) ret v = - if ret = 0 then - Ok v - else begin - on_error () ; - Error (error_of_int ret) - end - -external set_maxreaders : t -> int -> int = "stub_mdb_env_set_maxreaders" [@@noalloc] - -let set_maxreaders t readers = - let ret = set_maxreaders t readers in - return ret () - -external set_maxdbs : t -> int -> int = "stub_mdb_env_set_maxdbs" [@@noalloc] - -let set_maxdbs t dbs = - let ret = set_maxdbs t dbs in - return ret () - -external set_mapsize : t -> int64 -> int = "stub_mdb_env_set_mapsize" [@@noalloc] - -let set_mapsize t size = - let ret = set_mapsize t size in - return ret () - -external opendir : - t -> string -> int -> Unix.file_perm -> int = "stub_mdb_env_open" [@@noalloc] - -external closedir : - t -> unit = "stub_mdb_env_close" [@@noalloc] - -let opendir ?maxreaders ?maxdbs ?mapsize ?(flags=[]) path mode = - match create () with - | Error v -> Error (error_of_int v) - | Ok t -> - begin match maxreaders with - | None -> Ok () - | Some readers -> set_maxreaders t readers - end >>= fun () -> - begin match maxdbs with - | None -> Ok () - | Some dbs -> set_maxdbs t dbs - end >>= fun () -> - begin match mapsize with - | None -> Ok () - | Some size -> set_mapsize t size - end >>= fun () -> - let ret = opendir t path (int_of_flags_env flags) mode in - return ret t ~on_error:(fun () -> closedir t) - -external copy : - t -> string -> int -> int = "stub_mdb_env_copy2" [@@noalloc] - -let copy ?(compact=false) t path = - let ret = copy t path (if compact then 0x01 else 0x00) in - return ret () - -external copyfd : - t -> Unix.file_descr -> int -> int = "stub_mdb_env_copyfd2" [@@noalloc] - -let copyfd ?(compact=false) t fd = - let ret = copyfd t fd (if compact then 0x01 else 0x00) in - return ret () - -type stat = { - psize : int ; - depth : int ; - branch_pages : int ; - leaf_pages : int ; - overflow_pages : int ; - entries : int ; -} - -external stat : t -> stat = "stub_mdb_env_stat" - -type envinfo = { - mapsize : int ; - last_pgno : int ; - last_txnid : int ; - maxreaders : int ; - numreaders : int ; -} - -external envinfo : t -> envinfo = "stub_mdb_env_info" - -external sync : t -> bool -> int = "stub_mdb_env_sync" [@@noalloc] - -let sync ?(force=false) t = - let ret = sync t force in - return ret () - -external setclear_flags : - t -> int -> bool -> int = "stub_mdb_env_set_flags" [@@noalloc] - -let set_flags t flags = - let ret = setclear_flags t (int_of_flags_env flags) true in - return ret () - -let clear_flags t flags = - let ret = setclear_flags t (int_of_flags_env flags) false in - return ret () - -external get_flags : t -> int = "stub_mdb_env_get_flags" [@@noalloc] - -let get_flags t = - flags_env_of_int (get_flags t) - -external get_path : t -> string = "stub_mdb_env_get_path" -external get_fd : t -> Unix.file_descr = "stub_mdb_env_get_fd" [@@noalloc] -external get_maxreaders : t -> int = "stub_mdb_env_get_maxreaders" [@@noalloc] -external get_maxkeysize : t -> int = "stub_mdb_env_get_maxkeysize" [@@noalloc] - -type rawtxn -type ro -type rw -type _ txn = - | Txn_ro : rawtxn -> ro txn - | Txn_rw : rawtxn -> rw txn - -let rawtxn_of_txn : type a. a txn -> rawtxn = function - | Txn_ro rawtxn -> rawtxn - | Txn_rw rawtxn -> rawtxn - -external txn_begin : - t -> int -> rawtxn option -> (rawtxn, int) result = "stub_mdb_txn_begin" - -let create_rw_txn ?(nosync=false) ?(nometasync=false) ?parent t = - let flags = match nosync, nometasync with - | true, true -> int_of_flags_env [NoSync; NoMetaSync] - | true, false -> int_of_flag_env NoSync - | false, true -> int_of_flag_env NoMetaSync - | _ -> 0 in - match txn_begin t flags (Option.map ~f:rawtxn_of_txn parent) with - | Error i -> Error (error_of_int i) - | Ok tx -> Ok (Txn_rw tx) - -let create_ro_txn ?(nosync=false) ?(nometasync=false) ?parent t = - let flags = match nosync, nometasync with - | true, true -> int_of_flags_env [RdOnly; NoSync; NoMetaSync] - | true, false -> int_of_flags_env [RdOnly; NoSync] - | false, true -> int_of_flags_env [RdOnly; NoMetaSync] - | _ -> int_of_flag_env RdOnly in - match txn_begin t flags (Option.map ~f:rawtxn_of_txn parent) with - | Error i -> Error (error_of_int i) - | Ok tx -> Ok (Txn_ro tx) - -external get_txn_id : rawtxn -> int = "stub_mdb_txn_id" [@@noalloc] -external get_txn_env : rawtxn -> t = "stub_mdb_txn_env" - -let get_txn_id txn = - get_txn_id (rawtxn_of_txn txn) - -let get_txn_env txn = - get_txn_env (rawtxn_of_txn txn) - -external commit_txn : rawtxn -> int = "stub_mdb_txn_commit" [@@noalloc] -external abort_txn : rawtxn -> unit = "stub_mdb_txn_abort" [@@noalloc] - -let commit_txn txn = - return (commit_txn (rawtxn_of_txn txn)) () - -let abort_txn txn = - abort_txn (rawtxn_of_txn txn) - -external reset_ro_txn : rawtxn -> unit = "stub_mdb_txn_reset" [@@noalloc] -external renew_ro_txn : rawtxn -> int = "stub_mdb_txn_renew" [@@noalloc] - -let reset_ro_txn (Txn_ro rawtxn) = - reset_ro_txn rawtxn - -let renew_ro_txn (Txn_ro rawtxn) = - return (renew_ro_txn rawtxn) () - -type db = nativeint - -external opendb : - rawtxn -> string option -> int -> (db, int) result = "stub_mdb_dbi_open" - -let opendb ?(flags=[]) ?name txn = - R.reword_error error_of_int - (opendb (rawtxn_of_txn txn) name (int_of_flags_open flags)) - -external db_stat : - rawtxn -> db -> (stat, int) result = "stub_mdb_stat" - -let db_stat txn dbi = - R.reword_error error_of_int (db_stat (rawtxn_of_txn txn) dbi) - -external db_flags : - rawtxn -> db -> (int, int) result = "stub_mdb_dbi_flags" - -let db_flags txn dbi = - match db_flags (rawtxn_of_txn txn) dbi with - | Error i -> Error (error_of_int i) - | Ok v -> Ok (flags_open_of_int v) - -external db_drop : - rawtxn -> db -> bool -> int = "stub_mdb_drop" [@@noalloc] - -let db_drop txn dbi = - return (db_drop (rawtxn_of_txn txn) dbi false) () - -let with_ro_db ?nosync ?nometasync ?parent ?flags ?name t ~f = - create_ro_txn ?nosync ?nometasync ?parent t >>= fun txn -> - opendb ?flags ?name txn >>= fun db -> - match f txn db with - | exception exn -> - abort_txn txn ; - raise exn - | Ok res -> - commit_txn txn >>= fun () -> - Ok res - | Error err -> - abort_txn txn ; - Error err - -let with_rw_db ?nosync ?nometasync ?parent ?flags ?name t ~f = - create_rw_txn ?nosync ?nometasync ?parent t >>= fun txn -> - opendb ?flags ?name txn >>= fun db -> - match f txn db with - | exception exn -> - abort_txn txn ; - raise exn - | Ok res -> - commit_txn txn >>= fun () -> - Ok res - | Error err -> - abort_txn txn ; - Error err - -type buffer = (char, Bigarray.int8_unsigned_elt, Bigarray.c_layout) Bigarray.Array1.t - -external get : - rawtxn -> db -> string -> (buffer, int) result = "stub_mdb_get" - -let get txn dbi k = - R.reword_error error_of_int (get (rawtxn_of_txn txn) dbi k) - -let mem txn dbi k = - match get txn dbi k with - | Ok _ -> Ok true - | Error KeyNotFound -> Ok false - | Error err -> Error err - -external put : - rawtxn -> db -> string -> buffer -> int -> int = "stub_mdb_put" [@@noalloc] -external put_string : - rawtxn -> db -> string -> string -> int -> int = "stub_mdb_put_string" [@@noalloc] - -let put ?(flags=[]) txn dbi k v = - let flags = int_of_flags_put flags in - return (put (rawtxn_of_txn txn) dbi k v flags) () - -let put_string ?(flags=[]) txn dbi k v = - let flags = int_of_flags_put flags in - return (put_string (rawtxn_of_txn txn) dbi k v flags) () - -external del : - rawtxn -> db -> string -> buffer option -> int = "stub_mdb_del" [@@noalloc] -external del_string : - rawtxn -> db -> string -> string option -> int = "stub_mdb_del_string" [@@noalloc] - -let del ?data txn dbi k = - return (del (rawtxn_of_txn txn) dbi k data) () - -let del_string ?data txn dbi k = - return (del_string (rawtxn_of_txn txn) dbi k data) () - -type rawcursor -type _ cursor = - | Cursor_ro : rawcursor -> ro cursor - | Cursor_rw : rawcursor -> rw cursor - -let rawcursor_of_cursor : type a. a cursor -> rawcursor = function - | Cursor_ro rawcursor -> rawcursor - | Cursor_rw rawcursor -> rawcursor - -let cursor_ro rawcursor = Cursor_ro rawcursor -let cursor_rw rawcursor = Cursor_rw rawcursor - -external opencursor : - rawtxn -> db -> (rawcursor, int) result = "stub_mdb_cursor_open" - -let opencursor : - type a. a txn -> db -> (a cursor, error) result = fun txn dbi -> - match txn with - | Txn_ro rawtxn -> - R.reword_error error_of_int (opencursor rawtxn dbi) |> - R.map cursor_ro - | Txn_rw rawtxn -> - R.reword_error error_of_int (opencursor rawtxn dbi) |> - R.map cursor_rw - -external cursor_close : - rawcursor -> unit = "stub_mdb_cursor_close" [@@noalloc] - -external cursor_renew : - rawtxn -> rawcursor -> int = "stub_mdb_cursor_renew" [@@noalloc] - -let cursor_close cursor = - cursor_close (rawcursor_of_cursor cursor) - -let cursor_renew (Txn_ro rawtxn) (Cursor_ro rawcursor) = - return (cursor_renew rawtxn rawcursor) () - -external cursor_txn : - rawcursor -> rawtxn = "stub_mdb_cursor_txn" - -let cursor_txn : type a. a cursor -> a txn = function - | Cursor_ro rawcursor -> Txn_ro (cursor_txn rawcursor) - | Cursor_rw rawcursor -> Txn_rw (cursor_txn rawcursor) - -external cursor_db : - rawcursor -> db = "stub_mdb_cursor_dbi" [@@noalloc] - -let cursor_db cursor = - cursor_db (rawcursor_of_cursor cursor) - -type cursor_op = - | First - | First_dup - | Get_both - | Get_both_range - | Get_current - | Get_multiple - | Last - | Last_dup - | Next - | Next_dup - | Next_multiple - | Next_nodup - | Prev - | Prev_dup - | Prev_nodup - | Set - | Set_key - | Set_range - | Prev_multiple - -external cursor_get_op : - rawcursor -> string option -> buffer option -> cursor_op -> - (buffer * buffer, int) result = "stub_mdb_cursor_get" - -let cursor_get_op ?key ?data cursor op = - R.reword_error error_of_int - (cursor_get_op (rawcursor_of_cursor cursor) key data op) - -let cursor_first cursor = - R.map ignore (cursor_get_op cursor First) -let cursor_last cursor = - R.map ignore (cursor_get_op cursor Last) -let cursor_next cursor = - R.map ignore (cursor_get_op cursor Next) -let cursor_prev cursor = - R.map ignore (cursor_get_op cursor Prev) -let cursor_at cursor = function - | "" -> cursor_first cursor - | key -> R.map ignore (cursor_get_op ~key cursor Set_range) - -let cursor_get cursor = - cursor_get_op cursor Get_current - -let cursor_fold_left ~f ~init cursor = - let rec inner a = - match cursor_get cursor with - | Error KeyNotFound -> Ok a - | Error err -> Error err - | Ok kv -> - f a kv >>= fun a -> - match cursor_next cursor with - | Error KeyNotFound -> Ok a - | Error err -> Error err - | Ok () -> inner a - in - inner init - -let cursor_iter ~f cursor = - cursor_fold_left ~init:() ~f:(fun () kv -> f kv) cursor - -external cursor_put : - rawcursor -> string -> buffer -> int -> int = "stub_mdb_cursor_put" [@@noalloc] -external cursor_put_string : - rawcursor -> string -> string -> int -> int = "stub_mdb_cursor_put_string" [@@noalloc] -external cursor_del : - rawcursor -> int -> int = "stub_mdb_cursor_del" [@@noalloc] -external cursor_count : - rawcursor -> (int, int) result = "stub_mdb_cursor_count" - -let cursor_put ?(flags=[]) cursor k v = - return - (cursor_put (rawcursor_of_cursor cursor) k v (int_of_flags_put flags)) - () - -let cursor_put_string ?(flags=[]) cursor k v = - return - (cursor_put_string (rawcursor_of_cursor cursor) k v (int_of_flags_put flags)) - () - -let cursor_del ?(flags=[]) cursor = - return - (cursor_del (rawcursor_of_cursor cursor) (int_of_flags_put flags)) - () - -let cursor_count cursor = - R.reword_error error_of_int - (cursor_count (rawcursor_of_cursor cursor)) - -let with_cursor txn db ~f = - opencursor txn db >>= fun cursor -> - finalize - ~final:(fun () -> cursor_close cursor) - ~f:(fun () -> f cursor) - -(*--------------------------------------------------------------------------- - Copyright (c) 2018 Vincent Bernardoff - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - ---------------------------------------------------------------------------*) diff --git a/vendors/ocaml-lmdb/src/lmdb.mli b/vendors/ocaml-lmdb/src/lmdb.mli deleted file mode 100644 index 92e74fb81388..000000000000 --- a/vendors/ocaml-lmdb/src/lmdb.mli +++ /dev/null @@ -1,255 +0,0 @@ -(*--------------------------------------------------------------------------- - Copyright (c) 2018 Vincent Bernardoff. All rights reserved. - Distributed under the ISC license, see terms at the end of the file. - ---------------------------------------------------------------------------*) - -open Rresult - -type error = - | NoSuchFileOrDir - | IOError - | EnvironmentLocked - | OutOfMemory - | PermissionDenied - | InvalidArgument - | NoSpaceLeftOnDevice - | KeyExist - | KeyNotFound - | PageNotFound - | Corrupted - | Panic - | VersionMismatch - | InvalidFile - | MapFull - | DbsFull - | ReadersFull - | TLSFull - | TxnFull - | CursorFull - | PageFull - | MapResized - | Incompatible - | BadRslot - | BadTxn - | BadValSize - | BadDbi - | TxnProblem - -val string_of_error : error -> string -val pp_error : Format.formatter -> error -> unit -val to_msg : ('a, error) result -> ('a, [> R.msg]) result - -type version = { - major : int ; - minor : int ; - patch : int ; -} - -val version : unit -> version - -type ro -type rw -type t - -type flag_env = - | FixedMap - | NoSubdir - | NoSync - | RdOnly - | NoMetaSync - | WriteMap - | MapAsync - | NoTLS - | NoLock - | NoRdAhead - | NoMemInit - | PrevMeta - -val opendir : - ?maxreaders:int -> ?maxdbs:int -> ?mapsize:int64 -> ?flags:flag_env list -> - string -> Unix.file_perm -> (t, error) result - -val closedir : t -> unit - -val copy : ?compact:bool -> t -> string -> (unit, error) result -val copyfd : ?compact:bool -> t -> Unix.file_descr -> (unit, error) result - -type stat = { - psize : int ; - depth : int ; - branch_pages : int ; - leaf_pages : int ; - overflow_pages : int ; - entries : int ; -} - -val stat : t -> stat - -type envinfo = { - mapsize : int ; - last_pgno : int ; - last_txnid : int ; - maxreaders : int ; - numreaders : int ; -} - -val envinfo : t -> envinfo - -val sync : ?force:bool -> t -> (unit, error) result - -val get_flags : t -> flag_env list -val set_flags : t -> flag_env list -> (unit, error) result -val clear_flags : t -> flag_env list -> (unit, error) result - -val get_path : t -> string -val get_fd : t -> Unix.file_descr - -val get_maxreaders : t -> int -val get_maxkeysize : t -> int - -val set_mapsize : t -> int64 -> (unit, error) result - -type _ txn - -val create_rw_txn : - ?nosync:bool -> ?nometasync:bool -> - ?parent:rw txn -> t -> (rw txn, error) result - -val create_ro_txn : - ?nosync:bool -> ?nometasync:bool -> - ?parent:_ txn -> t -> (ro txn, error) result - -val get_txn_id : _ txn -> int -val get_txn_env : _ txn -> t - -val commit_txn : _ txn -> (unit, error) result -val abort_txn : _ txn -> unit - -val reset_ro_txn : ro txn -> unit -val renew_ro_txn : ro txn -> (unit, error) result - -type flag_open = - | ReverseKey - | DupSort - | IntegerKey - | DupFixed - | IntegerDup - | ReverseDup - | Create - -type db - -val opendb : - ?flags:flag_open list -> ?name:string -> _ txn -> (db, error) result - -val db_stat : _ txn -> db -> (stat, error) result -val db_flags : _ txn -> db -> (flag_open list, error) result -val db_drop : _ txn -> db -> (unit, error) result - -val with_ro_db : - ?nosync:bool -> ?nometasync:bool -> - ?parent:_ txn -> ?flags:flag_open list -> - ?name:string -> t -> f:(ro txn -> db -> ('a, error) result) -> - ('a, error) result - -val with_rw_db : - ?nosync:bool -> ?nometasync:bool -> - ?parent:rw txn -> ?flags:flag_open list -> - ?name:string -> t -> f:(rw txn -> db -> ('a, error) result) -> - ('a, error) result - -type buffer = (char, Bigarray.int8_unsigned_elt, Bigarray.c_layout) Bigarray.Array1.t - -val get : _ txn -> db -> string -> (buffer, error) result -val mem : _ txn -> db -> string -> (bool, error) result - -type flag_put = - | NoOverwrite - | NoDupData - | Current - | Reserve - | Append - | AppendDup - | Multiple - -val put : ?flags:flag_put list -> - rw txn -> db -> string -> buffer -> (unit, error) result -val put_string : ?flags:flag_put list -> - rw txn -> db -> string -> string -> (unit, error) result - -val del : ?data:buffer -> - rw txn -> db -> string -> (unit, error) result -val del_string : ?data:string -> - rw txn -> db -> string -> (unit, error) result - -type _ cursor - -val opencursor : 'a txn -> db -> ('a cursor, error) result -val cursor_close : _ cursor -> unit -val cursor_renew : ro txn -> ro cursor -> (unit, error) result - -val cursor_txn : 'a cursor -> 'a txn -val cursor_db : _ cursor -> db - -val cursor_first : _ cursor -> (unit, error) result -val cursor_last : _ cursor -> (unit, error) result -val cursor_prev : _ cursor -> (unit, error) result -val cursor_next : _ cursor -> (unit, error) result -val cursor_at : _ cursor -> string -> (unit, error) result - -val cursor_get : _ cursor -> (buffer * buffer, error) result - -val cursor_fold_left : - f:('a -> (buffer * buffer) -> ('a, error) result) -> - init:'a -> _ cursor -> ('a, error) result - -val cursor_iter : - f:(buffer * buffer -> (unit, error) result) -> _ cursor -> (unit, error) result - -val with_cursor : - 'a txn -> db -> f:('a cursor -> ('b, error) result) -> - ('b, error) result - -type cursor_op = - | First - | First_dup - | Get_both - | Get_both_range - | Get_current - | Get_multiple - | Last - | Last_dup - | Next - | Next_dup - | Next_multiple - | Next_nodup - | Prev - | Prev_dup - | Prev_nodup - | Set - | Set_key - | Set_range - | Prev_multiple - -val cursor_put : ?flags:flag_put list -> - rw cursor -> string -> buffer -> (unit, error) result -val cursor_put_string : ?flags:flag_put list -> - rw cursor -> string -> string -> (unit, error) result -val cursor_del : ?flags:flag_put list -> rw cursor -> (unit, error) result -val cursor_count : _ cursor -> (int, error) result - -(*--------------------------------------------------------------------------- - Copyright (c) 2018 Vincent Bernardoff - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - ---------------------------------------------------------------------------*) diff --git a/vendors/ocaml-lmdb/src/lmdb_stubs.c b/vendors/ocaml-lmdb/src/lmdb_stubs.c deleted file mode 100644 index fbd84adc2e98..000000000000 --- a/vendors/ocaml-lmdb/src/lmdb_stubs.c +++ /dev/null @@ -1,569 +0,0 @@ -/* -------------------------------------------------------------------------- - Copyright (c) 2018 Vincent Bernardoff. All rights reserved. - Distributed under the ISC license, see terms at the end of the file. - --------------------------------------------------------------------------- */ - -#include - -#include -#include -#include -#include -#include - -#include "lmdb.h" - -CAMLprim value stub_mdb_version(value unit) { - CAMLparam1(unit); - CAMLlocal1(result); - - int major, minor, patch; - mdb_version(&major, &minor, &patch); - result = caml_alloc_tuple(3); - Store_field(result, 0, Val_int(major)); - Store_field(result, 1, Val_int(minor)); - Store_field(result, 2, Val_int(patch)); - - CAMLreturn(result); -} - -CAMLprim value stub_mdb_strerror(value errno) { - CAMLparam1(errno); - CAMLlocal1(result); - - char *errstr; - errstr = mdb_strerror(Int_val(errno)); - result = caml_copy_string(errstr); - - CAMLreturn(result); -} - -#define Env_val(v) (*((MDB_env **) Data_custom_val(v))) -#define Txn_val(v) (*((MDB_txn **) Data_custom_val(v))) -#define Cursor_val(v) (*((MDB_cursor **) Data_custom_val(v))) - -#define Gen_custom_block(SNAME, CNAME, MNAME) \ - static int compare_##SNAME(value a, value b) { \ - CNAME *aa = MNAME(a), *bb = MNAME(b); \ - return (aa == bb ? 0 : (aa < bb ? -1 : 1)); \ - } \ - \ - static struct custom_operations lmdb_##SNAME##_ops = { \ - .identifier = "lmdb_" #SNAME, \ - .finalize = custom_finalize_default, \ - .compare = compare_##SNAME, \ - .compare_ext = custom_compare_ext_default, \ - .hash = custom_hash_default, \ - .serialize = custom_serialize_default, \ - .deserialize = custom_deserialize_default \ - }; \ - \ - static value alloc_##SNAME (CNAME *a) { \ - value custom = alloc_custom(&lmdb_##SNAME##_ops, sizeof(CNAME *), 0, 1); \ - MNAME(custom) = a; \ - return custom; \ - } - -Gen_custom_block(env, MDB_env, Env_val) -Gen_custom_block(txn, MDB_txn, Txn_val) -Gen_custom_block(cursor, MDB_cursor, Cursor_val) - -CAMLprim value stub_mdb_env_create(value unit) { - CAMLparam1(unit); - CAMLlocal2(result, ml_env); - - int ret; - MDB_env *env; - - ret = mdb_env_create(&env); - if (ret) { - result = caml_alloc(1, 1); - Store_field(result, 0, Val_int(ret)); - } - else { - result = caml_alloc(1, 0); - ml_env = alloc_env(env); - Store_field(result, 0, ml_env); - } - - CAMLreturn(result); -} - -CAMLprim value stub_mdb_env_open(value env, value path, value flags, value mode) { - return Val_int(mdb_env_open(Env_val(env), String_val(path), Int_val(flags), Int_val(mode))); -} - -CAMLprim value stub_mdb_env_close(value env) { - mdb_env_close(Env_val(env)); - return Val_unit; -} - -CAMLprim value stub_mdb_env_copy2(value env, value path, value flags) { - return Val_int(mdb_env_copy2(Env_val(env), String_val(path), Int_val(flags))); -} - -CAMLprim value stub_mdb_env_copyfd2(value env, value fd, value flags) { - return Val_int(mdb_env_copyfd2(Env_val(env), Int_val(fd), Int_val(flags))); -} - -static void caml_mdb_stat(value result, const MDB_stat *stat) { - Store_field(result, 0, Val_int(stat->ms_psize)); - Store_field(result, 1, Val_int(stat->ms_depth)); - Store_field(result, 2, Val_long(stat->ms_branch_pages)); - Store_field(result, 3, Val_long(stat->ms_leaf_pages)); - Store_field(result, 4, Val_long(stat->ms_overflow_pages)); - Store_field(result, 5, Val_long(stat->ms_entries)); -} - -CAMLprim value stub_mdb_env_stat(value env) { - CAMLparam1(env); - CAMLlocal1(result); - - MDB_stat stat; - mdb_env_stat(Env_val(env), &stat); - result = caml_alloc_tuple(6); - caml_mdb_stat(result, &stat); - CAMLreturn(result); -} - -CAMLprim value stub_mdb_env_info(value env) { - CAMLparam1(env); - CAMLlocal1(result); - - MDB_envinfo info; - mdb_env_info(Env_val(env), &info); - result = caml_alloc_tuple(5); - - Store_field(result, 0, Val_long(info.me_mapsize)); - Store_field(result, 1, Val_long(info.me_last_pgno)); - Store_field(result, 2, Val_long(info.me_last_txnid)); - Store_field(result, 3, Val_int(info.me_maxreaders)); - Store_field(result, 4, Val_int(info.me_numreaders)); - - CAMLreturn(result); -} - -CAMLprim value stub_mdb_env_sync(value env, value force) { - return Val_int(mdb_env_sync(Env_val(env), Bool_val(force))); -} - -CAMLprim value stub_mdb_env_set_flags(value env, value flags, value onoff) { - return Val_int(mdb_env_set_flags(Env_val(env), Int_val(flags), Bool_val(onoff))); -} - -CAMLprim value stub_mdb_env_get_flags(value env) { - unsigned int flags; - mdb_env_get_flags(Env_val(env), &flags); - return Val_int(flags); -} - -CAMLprim value stub_mdb_env_get_path(value env) { - CAMLparam1(env); - CAMLlocal1(result); - - const char *path; - mdb_env_get_path(Env_val(env), &path); - result = caml_copy_string(path); - - CAMLreturn(result); -} - -CAMLprim value stub_mdb_env_get_fd(value env) { - mdb_filehandle_t fd; - mdb_env_get_fd(Env_val(env), &fd); - return Val_int(fd); -} - -CAMLprim value stub_mdb_env_set_mapsize(value env, value size) { - return Val_int(mdb_env_set_mapsize(Env_val(env), Int64_val(size))); -} - -CAMLprim value stub_mdb_env_set_maxreaders(value env, value readers) { - return Val_int(mdb_env_set_maxreaders(Env_val(env), Int_val(readers))); -} - -CAMLprim value stub_mdb_env_get_maxreaders(value env) { - unsigned int readers; - mdb_env_get_maxreaders(Env_val(env), &readers); - return Val_int(readers); -} - -CAMLprim value stub_mdb_env_set_maxdbs(value env, value dbs) { - return Val_int(mdb_env_set_maxdbs(Env_val(env), Int_val(dbs))); -} - -CAMLprim value stub_mdb_env_get_maxkeysize(value env) { - return Val_int(mdb_env_get_maxkeysize(Env_val(env))); -} - -CAMLprim value stub_mdb_txn_begin(value env, value flags, value parent) { - CAMLparam3(env, flags, parent); - CAMLlocal2(result, ml_txn); - - int ret; - MDB_txn *parent_txn = Is_block(parent) ? Txn_val(Field(parent, 0)) : NULL; - MDB_txn *new_txn; - - ret = mdb_txn_begin(Env_val(env), parent_txn, Int_val(flags), &new_txn); - - if (ret) { - result = caml_alloc(1, 1); - Store_field(result, 0, Val_int(ret)); - } - else { - result = caml_alloc(1, 0); - ml_txn = alloc_txn(new_txn); - Store_field(result, 0, ml_txn); - } - - CAMLreturn(result); -} - -CAMLprim value stub_mdb_txn_env(value txn) { - CAMLparam1(txn); - CAMLlocal1(result); - MDB_env *env = mdb_txn_env(Txn_val(txn)); - result = alloc_env(env); - CAMLreturn(result); -} - -CAMLprim value stub_mdb_txn_id(value txn) { - return Val_long(mdb_txn_id(Txn_val(txn))); -} - -CAMLprim value stub_mdb_txn_commit(value txn) { - return Val_int(mdb_txn_commit(Txn_val(txn))); -} - -CAMLprim value stub_mdb_txn_abort(value txn) { - mdb_txn_abort(Txn_val(txn)); - return Val_unit; -} - -CAMLprim value stub_mdb_txn_reset(value txn) { - mdb_txn_reset(Txn_val(txn)); - return Val_unit; -} - -CAMLprim value stub_mdb_txn_renew(value txn) { - return Val_int(mdb_txn_renew(Txn_val(txn))); -} - -CAMLprim value stub_mdb_dbi_open(value txn, value name, value flags) { - CAMLparam3(txn, name, flags); - CAMLlocal2(result, ml_dbi); - - MDB_dbi dbi; - int ret; - const char* db_name = NULL; - - if (Is_block(name)) db_name = String_val(Field(name, 0)); - - ret = mdb_dbi_open(Txn_val(txn), db_name, Int_val(flags), &dbi); - - if (ret) { - result = caml_alloc(1, 1); - Store_field(result, 0, Val_int(ret)); - } - else { - result = caml_alloc(1, 0); - ml_dbi = caml_copy_nativeint(dbi); - Store_field(result, 0, ml_dbi); - } - - CAMLreturn(result); -} - -CAMLprim value stub_mdb_stat(value txn, value dbi) { - CAMLparam2(txn, dbi); - CAMLlocal2(result, tuple); - - MDB_stat stat; - int ret; - ret = mdb_stat(Txn_val(txn), Nativeint_val(dbi), &stat); - - if (ret) { - result = caml_alloc(1, 1); - Store_field(result, 0, Val_int(ret)); - } - else { - result = caml_alloc(1, 0); - tuple = caml_alloc_tuple(6); - caml_mdb_stat(tuple, &stat); - Store_field(result, 0, tuple); - } - - CAMLreturn(result); -} - -CAMLprim value stub_mdb_dbi_flags(value txn, value dbi) { - CAMLparam2(txn, dbi); - CAMLlocal1(result); - - unsigned int flags; - int ret; - ret = mdb_dbi_flags(Txn_val(txn), Nativeint_val(dbi), &flags); - - if (ret) { - result = caml_alloc(1, 1); - Store_field(result, 0, Val_int(ret)); - } - else { - result = caml_alloc(1, 0); - Store_field(result, 0, Val_int(flags)); - } - - CAMLreturn(result); -} - -CAMLprim value stub_mdb_dbi_close(value env, value dbi) { - mdb_dbi_close(Env_val(env), Nativeint_val(dbi)); - return Val_unit; -} - -CAMLprim value stub_mdb_drop(value txn, value dbi, value del) { - return Val_int(mdb_drop(Txn_val(txn), Nativeint_val(dbi), Bool_val(del))); -} - -static inline value alloc_mdb_val_ba (MDB_val *v) { - return - (v ? - caml_ba_alloc_dims(CAML_BA_UINT8 | CAML_BA_C_LAYOUT, 1, v->mv_data, v->mv_size) : - caml_ba_alloc_dims(CAML_BA_UINT8 | CAML_BA_C_LAYOUT, 1, NULL, 0)); -} - -CAMLprim value stub_mdb_get(value txn, value dbi, value key) { - CAMLparam3(txn, dbi, key); - CAMLlocal1(result); - - MDB_val k, v; - int ret; - - k.mv_size = caml_string_length(key); - k.mv_data = Bytes_val(key); - - ret = mdb_get(Txn_val(txn), Nativeint_val(dbi), &k, &v); - if (ret) { - result = caml_alloc(1, 1); - Store_field(result, 0, Val_int(ret)); - } - else { - result = caml_alloc(1, 0); - Store_field(result, 0, alloc_mdb_val_ba(&v)); - } - - CAMLreturn(result); -} - -CAMLprim value stub_mdb_put(value txn, value dbi, - value key, value data, value flags) { - MDB_val k, v; - k.mv_size = caml_string_length(key); - k.mv_data = Bytes_val(key); - v.mv_size = Caml_ba_array_val(data)->dim[0]; - v.mv_data = Caml_ba_data_val(data); - return Val_int(mdb_put(Txn_val(txn), Nativeint_val(dbi), &k, &v, Int_val(flags))); -} - -CAMLprim value stub_mdb_put_string(value txn, value dbi, - value key, value data, value flags) { - MDB_val k, v; - k.mv_size = caml_string_length(key); - k.mv_data = Bytes_val(key); - v.mv_size = caml_string_length(data); - v.mv_data = Bytes_val(data); - return Val_int(mdb_put(Txn_val(txn), Nativeint_val(dbi), &k, &v, Int_val(flags))); -} - -CAMLprim value stub_mdb_del(value txn, value dbi, value key, value data) { - MDB_val k, v, *vp = NULL; - k.mv_size = caml_string_length(key); - k.mv_data = Bytes_val(key); - - if (Is_block(data)) { - v.mv_size = Caml_ba_array_val(Field(data, 0))->dim[0]; - v.mv_data = Caml_ba_data_val(Field(data, 0)); - vp = &v; - } - - return Val_int(mdb_del(Txn_val(txn), Nativeint_val(dbi), &k, vp)); -} - -CAMLprim value stub_mdb_del_string(value txn, value dbi, value key, value data) { - MDB_val k, v, *vp = NULL; - k.mv_size = caml_string_length(key); - k.mv_data = Bytes_val(key); - - if (Is_block(data)) { - v.mv_size = caml_string_length(Field(data, 0)); - v.mv_data = Bytes_val(Field(data, 0)); - vp = &v; - } - - return Val_int(mdb_del(Txn_val(txn), Nativeint_val(dbi), &k, vp)); -} - -CAMLprim value stub_mdb_cursor_open(value txn, value dbi) { - CAMLparam2(txn, dbi); - CAMLlocal2(result, ml_cursor); - - MDB_cursor *cursor; - int ret; - ret = mdb_cursor_open(Txn_val(txn), Nativeint_val(dbi), &cursor); - - if (ret) { - result = caml_alloc(1, 1); - Store_field(result, 0, Val_int(ret)); - } - else { - result = caml_alloc(1, 0); - ml_cursor = alloc_cursor(cursor); - Store_field(result, 0, ml_cursor); - } - - CAMLreturn(result); -} - -CAMLprim value stub_mdb_cursor_close(value cursor) { - mdb_cursor_close(Cursor_val(cursor)); - return Val_unit; -} - -CAMLprim value stub_mdb_cursor_renew(value txn, value cursor) { - return Val_int(mdb_cursor_renew(Txn_val(txn), Cursor_val(cursor))); -} - -CAMLprim value stub_mdb_cursor_txn(value cursor) { - CAMLparam1(cursor); - CAMLlocal1(txn); - txn = alloc_txn(mdb_cursor_txn(Cursor_val(cursor))); - CAMLreturn(txn); -} - -CAMLprim value stub_mdb_cursor_dbi(value cursor) { - return Val_int(mdb_cursor_dbi(Cursor_val(cursor))); -} - -CAMLprim value stub_mdb_cursor_get(value cursor, value key, value data, value op) { - CAMLparam4(cursor, key, data, op); - CAMLlocal2(result, tuple); - - MDB_val k, v; - int ret; - - if (Is_block(key)) { - k.mv_size = caml_string_length(Field(key, 0)); - k.mv_data = Bytes_val(Field(key, 0)); - } - - if (Is_block(data)) { - v.mv_size = Caml_ba_array_val(Field(data, 0))->dim[0]; - v.mv_data = Caml_ba_data_val(Field(data, 0)); - } - - ret = mdb_cursor_get(Cursor_val(cursor), &k, &v, Int_val(op)); - if (ret) { - result = caml_alloc(1, 1); - Store_field(result, 0, Val_int(ret)); - } - else { - result = caml_alloc(1, 0); - tuple = caml_alloc_tuple(2); - Store_field(tuple, 0, alloc_mdb_val_ba(&k)); - Store_field(tuple, 1, alloc_mdb_val_ba(&v)); - Store_field(result, 0, tuple); - } - - CAMLreturn(result); -} - -CAMLprim value stub_mdb_cursor_get_string(value cursor, value key, value data, value op) { - CAMLparam4(cursor, key, data, op); - CAMLlocal2(result, tuple); - - MDB_val k, v; - int ret; - - if (Is_block(key)) { - k.mv_size = caml_string_length(Field(key, 0)); - k.mv_data = Bytes_val(Field(key, 0)); - } - - if (Is_block(data)) { - v.mv_size = caml_string_length(Field(data, 0)); - v.mv_data = Bytes_val(Field(data, 0)); - } - - ret = mdb_cursor_get(Cursor_val(cursor), &k, &v, Int_val(op)); - if (ret) { - result = caml_alloc(1, 1); - Store_field(result, 0, Val_int(ret)); - } - else { - result = caml_alloc(1, 0); - tuple = caml_alloc_tuple(2); - Store_field(tuple, 0, alloc_mdb_val_ba(&k)); - Store_field(tuple, 1, alloc_mdb_val_ba(&v)); - Store_field(result, 0, tuple); - } - - CAMLreturn(result); -} - -CAMLprim value stub_mdb_cursor_put(value cursor, value key, value data, value flags) { - MDB_val k, v; - k.mv_size = caml_string_length(key); - k.mv_data = Bytes_val(key); - v.mv_size = Caml_ba_array_val(data)->dim[0]; - v.mv_data = Caml_ba_data_val(data); - return Val_int(mdb_cursor_put(Cursor_val(cursor), &k, &v, Int_val(flags))); -} - -CAMLprim value stub_mdb_cursor_put_string(value cursor, value key, value data, value flags) { - MDB_val k, v; - k.mv_size = caml_string_length(key); - k.mv_data = Bytes_val(key); - v.mv_size = caml_string_length(data); - v.mv_data = Bytes_val(data); - return Val_int(mdb_cursor_put(Cursor_val(cursor), &k, &v, Int_val(flags))); -} - -CAMLprim value stub_mdb_cursor_del(value cursor, value flags) { - return Val_int(mdb_cursor_del(Cursor_val(cursor), Int_val(flags))); -} - -CAMLprim value stub_mdb_cursor_count(value cursor) { - CAMLparam1(cursor); - CAMLlocal1(result); - - mdb_size_t count; - int ret; - - ret = mdb_cursor_count(Cursor_val(cursor), &count); - if (ret) { - result = caml_alloc(1, 1); - Store_field(result, 0, Val_int(ret)); - } - else { - result = caml_alloc(1, 0); - Store_field(result, 0, Val_long(count)); - } - - CAMLreturn(result); -} - -/* -------------------------------------------------------------------------- - Copyright (c) 2018 Vincent Bernardoff - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - --------------------------------------------------------------------------- */ diff --git a/vendors/ocaml-lmdb/src/mdb.c b/vendors/ocaml-lmdb/src/mdb.c deleted file mode 100644 index 85f5bc3fbaf9..000000000000 --- a/vendors/ocaml-lmdb/src/mdb.c +++ /dev/null @@ -1,11153 +0,0 @@ -/** @file mdb.c - * @brief Lightning memory-mapped database library - * - * A Btree-based database management library modeled loosely on the - * BerkeleyDB API, but much simplified. - */ -/* - * Copyright 2011-2018 Howard Chu, Symas Corp. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted only as authorized by the OpenLDAP - * Public License. - * - * A copy of this license is available in the file LICENSE in the - * top-level directory of the distribution or, alternatively, at - * . - * - * This code is derived from btree.c written by Martin Hedenfalk. - * - * Copyright (c) 2009, 2010 Martin Hedenfalk - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ -#ifndef _GNU_SOURCE -#define _GNU_SOURCE 1 -#endif -#if defined(MDB_VL32) || defined(__WIN64__) -#define _FILE_OFFSET_BITS 64 -#endif -#ifdef _WIN32 -#include -#include -#include /* get wcscpy() */ - -/* We use native NT APIs to setup the memory map, so that we can - * let the DB file grow incrementally instead of always preallocating - * the full size. These APIs are defined in and - * but those headers are meant for driver-level development and - * conflict with the regular user-level headers, so we explicitly - * declare them here. We get pointers to these functions from - * NTDLL.DLL at runtime, to avoid buildtime dependencies on any - * NTDLL import libraries. - */ -typedef NTSTATUS WINAPI (NtCreateSectionFunc) - (OUT PHANDLE sh, IN ACCESS_MASK acc, - IN void * oa OPTIONAL, - IN PLARGE_INTEGER ms OPTIONAL, - IN ULONG pp, IN ULONG aa, IN HANDLE fh OPTIONAL); - -static NtCreateSectionFunc *NtCreateSection; - -typedef enum _SECTION_INHERIT { - ViewShare = 1, - ViewUnmap = 2 -} SECTION_INHERIT; - -typedef NTSTATUS WINAPI (NtMapViewOfSectionFunc) - (IN PHANDLE sh, IN HANDLE ph, - IN OUT PVOID *addr, IN ULONG_PTR zbits, - IN SIZE_T cs, IN OUT PLARGE_INTEGER off OPTIONAL, - IN OUT PSIZE_T vs, IN SECTION_INHERIT ih, - IN ULONG at, IN ULONG pp); - -static NtMapViewOfSectionFunc *NtMapViewOfSection; - -typedef NTSTATUS WINAPI (NtCloseFunc)(HANDLE h); - -static NtCloseFunc *NtClose; - -/** getpid() returns int; MinGW defines pid_t but MinGW64 typedefs it - * as int64 which is wrong. MSVC doesn't define it at all, so just - * don't use it. - */ -#define MDB_PID_T int -#define MDB_THR_T DWORD -#include -#include -#ifdef __GNUC__ -# include -#else -# define LITTLE_ENDIAN 1234 -# define BIG_ENDIAN 4321 -# define BYTE_ORDER LITTLE_ENDIAN -# ifndef SSIZE_MAX -# define SSIZE_MAX INT_MAX -# endif -#endif -#else -#include -#include -#define MDB_PID_T pid_t -#define MDB_THR_T pthread_t -#include -#include -#include -#ifdef HAVE_SYS_FILE_H -#include -#endif -#include -#endif - -#if defined(__mips) && defined(__linux) -/* MIPS has cache coherency issues, requires explicit cache control */ -#include -extern int cacheflush(char *addr, int nbytes, int cache); -#define CACHEFLUSH(addr, bytes, cache) cacheflush(addr, bytes, cache) -#else -#define CACHEFLUSH(addr, bytes, cache) -#endif - -#if defined(__linux) && !defined(MDB_FDATASYNC_WORKS) -/** fdatasync is broken on ext3/ext4fs on older kernels, see - * description in #mdb_env_open2 comments. You can safely - * define MDB_FDATASYNC_WORKS if this code will only be run - * on kernels 3.6 and newer. - */ -#define BROKEN_FDATASYNC -#endif - -#include -#include -#include -#include -#include -#include -#include -#include - -#ifdef _MSC_VER -#include -typedef SSIZE_T ssize_t; -#else -#include -#endif - -#if defined(__sun) || defined(ANDROID) -/* Most platforms have posix_memalign, older may only have memalign */ -#define HAVE_MEMALIGN 1 -#include -/* On Solaris, we need the POSIX sigwait function */ -#if defined (__sun) -# define _POSIX_PTHREAD_SEMANTICS 1 -#endif -#endif - -#if !(defined(BYTE_ORDER) || defined(__BYTE_ORDER)) -#include -#include /* defines BYTE_ORDER on HPUX and Solaris */ -#endif - -#if defined(__APPLE__) || defined (BSD) || defined(__FreeBSD_kernel__) -# if !(defined(MDB_USE_POSIX_MUTEX) || defined(MDB_USE_POSIX_SEM)) -# define MDB_USE_SYSV_SEM 1 -# endif -# define MDB_FDATASYNC fsync -#elif defined(ANDROID) -# define MDB_FDATASYNC fsync -#endif - -#ifndef _WIN32 -#include -#include -#ifdef MDB_USE_POSIX_SEM -# define MDB_USE_HASH 1 -#include -#elif defined(MDB_USE_SYSV_SEM) -#include -#include -#ifdef _SEM_SEMUN_UNDEFINED -union semun { - int val; - struct semid_ds *buf; - unsigned short *array; -}; -#endif /* _SEM_SEMUN_UNDEFINED */ -#else -#define MDB_USE_POSIX_MUTEX 1 -#endif /* MDB_USE_POSIX_SEM */ -#endif /* !_WIN32 */ - -#if defined(_WIN32) + defined(MDB_USE_POSIX_SEM) + defined(MDB_USE_SYSV_SEM) \ - + defined(MDB_USE_POSIX_MUTEX) != 1 -# error "Ambiguous shared-lock implementation" -#endif - -#ifdef USE_VALGRIND -#include -#define VGMEMP_CREATE(h,r,z) VALGRIND_CREATE_MEMPOOL(h,r,z) -#define VGMEMP_ALLOC(h,a,s) VALGRIND_MEMPOOL_ALLOC(h,a,s) -#define VGMEMP_FREE(h,a) VALGRIND_MEMPOOL_FREE(h,a) -#define VGMEMP_DESTROY(h) VALGRIND_DESTROY_MEMPOOL(h) -#define VGMEMP_DEFINED(a,s) VALGRIND_MAKE_MEM_DEFINED(a,s) -#else -#define VGMEMP_CREATE(h,r,z) -#define VGMEMP_ALLOC(h,a,s) -#define VGMEMP_FREE(h,a) -#define VGMEMP_DESTROY(h) -#define VGMEMP_DEFINED(a,s) -#endif - -#ifndef BYTE_ORDER -# if (defined(_LITTLE_ENDIAN) || defined(_BIG_ENDIAN)) && !(defined(_LITTLE_ENDIAN) && defined(_BIG_ENDIAN)) -/* Solaris just defines one or the other */ -# define LITTLE_ENDIAN 1234 -# define BIG_ENDIAN 4321 -# ifdef _LITTLE_ENDIAN -# define BYTE_ORDER LITTLE_ENDIAN -# else -# define BYTE_ORDER BIG_ENDIAN -# endif -# else -# define BYTE_ORDER __BYTE_ORDER -# endif -#endif - -#ifndef LITTLE_ENDIAN -#define LITTLE_ENDIAN __LITTLE_ENDIAN -#endif -#ifndef BIG_ENDIAN -#define BIG_ENDIAN __BIG_ENDIAN -#endif - -#if defined(__i386) || defined(__x86_64) || defined(_M_IX86) -#define MISALIGNED_OK 1 -#endif - -#include "lmdb.h" -#include "midl.h" - -#if (BYTE_ORDER == LITTLE_ENDIAN) == (BYTE_ORDER == BIG_ENDIAN) -# error "Unknown or unsupported endianness (BYTE_ORDER)" -#elif (-6 & 5) || CHAR_BIT!=8 || UINT_MAX!=0xffffffff || MDB_SIZE_MAX%UINT_MAX -# error "Two's complement, reasonably sized integer types, please" -#endif - -#ifdef __GNUC__ -/** Put infrequently used env functions in separate section */ -# ifdef __APPLE__ -# define ESECT __attribute__ ((section("__TEXT,text_env"))) -# else -# define ESECT __attribute__ ((section("text_env"))) -# endif -#else -#define ESECT -#endif - -#ifdef _WIN32 -#define CALL_CONV WINAPI -#else -#define CALL_CONV -#endif - -/** @defgroup internal LMDB Internals - * @{ - */ -/** @defgroup compat Compatibility Macros - * A bunch of macros to minimize the amount of platform-specific ifdefs - * needed throughout the rest of the code. When the features this library - * needs are similar enough to POSIX to be hidden in a one-or-two line - * replacement, this macro approach is used. - * @{ - */ - - /** Features under development */ -#ifndef MDB_DEVEL -#define MDB_DEVEL 0 -#endif - - /** Wrapper around __func__, which is a C99 feature */ -#if __STDC_VERSION__ >= 199901L -# define mdb_func_ __func__ -#elif __GNUC__ >= 2 || _MSC_VER >= 1300 -# define mdb_func_ __FUNCTION__ -#else -/* If a debug message says (), update the #if statements above */ -# define mdb_func_ "" -#endif - -/* Internal error codes, not exposed outside liblmdb */ -#define MDB_NO_ROOT (MDB_LAST_ERRCODE + 10) -#ifdef _WIN32 -#define MDB_OWNERDEAD ((int) WAIT_ABANDONED) -#elif defined MDB_USE_SYSV_SEM -#define MDB_OWNERDEAD (MDB_LAST_ERRCODE + 11) -#elif defined(MDB_USE_POSIX_MUTEX) && defined(EOWNERDEAD) -#define MDB_OWNERDEAD EOWNERDEAD /**< #LOCK_MUTEX0() result if dead owner */ -#endif - -#ifdef __GLIBC__ -#define GLIBC_VER ((__GLIBC__ << 16 )| __GLIBC_MINOR__) -#endif -/** Some platforms define the EOWNERDEAD error code - * even though they don't support Robust Mutexes. - * Compile with -DMDB_USE_ROBUST=0, or use some other - * mechanism like -DMDB_USE_SYSV_SEM instead of - * -DMDB_USE_POSIX_MUTEX. (SysV semaphores are - * also Robust, but some systems don't support them - * either.) - */ -#ifndef MDB_USE_ROBUST -/* Android currently lacks Robust Mutex support. So does glibc < 2.4. */ -# if defined(MDB_USE_POSIX_MUTEX) && (defined(ANDROID) || \ - (defined(__GLIBC__) && GLIBC_VER < 0x020004)) -# define MDB_USE_ROBUST 0 -# else -# define MDB_USE_ROBUST 1 -# endif -#endif /* !MDB_USE_ROBUST */ - -#if defined(MDB_USE_POSIX_MUTEX) && (MDB_USE_ROBUST) -/* glibc < 2.12 only provided _np API */ -# if (defined(__GLIBC__) && GLIBC_VER < 0x02000c) || \ - (defined(PTHREAD_MUTEX_ROBUST_NP) && !defined(PTHREAD_MUTEX_ROBUST)) -# define PTHREAD_MUTEX_ROBUST PTHREAD_MUTEX_ROBUST_NP -# define pthread_mutexattr_setrobust(attr, flag) pthread_mutexattr_setrobust_np(attr, flag) -# define pthread_mutex_consistent(mutex) pthread_mutex_consistent_np(mutex) -# endif -#endif /* MDB_USE_POSIX_MUTEX && MDB_USE_ROBUST */ - -#if defined(MDB_OWNERDEAD) && (MDB_USE_ROBUST) -#define MDB_ROBUST_SUPPORTED 1 -#endif - -#ifdef _WIN32 -#define MDB_USE_HASH 1 -#define MDB_PIDLOCK 0 -#define THREAD_RET DWORD -#define pthread_t HANDLE -#define pthread_mutex_t HANDLE -#define pthread_cond_t HANDLE -typedef HANDLE mdb_mutex_t, mdb_mutexref_t; -#define pthread_key_t DWORD -#define pthread_self() GetCurrentThreadId() -#define pthread_key_create(x,y) \ - ((*(x) = TlsAlloc()) == TLS_OUT_OF_INDEXES ? ErrCode() : 0) -#define pthread_key_delete(x) TlsFree(x) -#define pthread_getspecific(x) TlsGetValue(x) -#define pthread_setspecific(x,y) (TlsSetValue(x,y) ? 0 : ErrCode()) -#define pthread_mutex_unlock(x) ReleaseMutex(*x) -#define pthread_mutex_lock(x) WaitForSingleObject(*x, INFINITE) -#define pthread_cond_signal(x) SetEvent(*x) -#define pthread_cond_wait(cond,mutex) do{SignalObjectAndWait(*mutex, *cond, INFINITE, FALSE); WaitForSingleObject(*mutex, INFINITE);}while(0) -#define THREAD_CREATE(thr,start,arg) \ - (((thr) = CreateThread(NULL, 0, start, arg, 0, NULL)) ? 0 : ErrCode()) -#define THREAD_FINISH(thr) \ - (WaitForSingleObject(thr, INFINITE) ? ErrCode() : 0) -#define LOCK_MUTEX0(mutex) WaitForSingleObject(mutex, INFINITE) -#define UNLOCK_MUTEX(mutex) ReleaseMutex(mutex) -#define mdb_mutex_consistent(mutex) 0 -#define getpid() GetCurrentProcessId() -#define MDB_FDATASYNC(fd) (!FlushFileBuffers(fd)) -#define MDB_MSYNC(addr,len,flags) (!FlushViewOfFile(addr,len)) -#define ErrCode() GetLastError() -#define GET_PAGESIZE(x) {SYSTEM_INFO si; GetSystemInfo(&si); (x) = si.dwPageSize;} -#define close(fd) (CloseHandle(fd) ? 0 : -1) -#define munmap(ptr,len) UnmapViewOfFile(ptr) -#ifdef PROCESS_QUERY_LIMITED_INFORMATION -#define MDB_PROCESS_QUERY_LIMITED_INFORMATION PROCESS_QUERY_LIMITED_INFORMATION -#else -#define MDB_PROCESS_QUERY_LIMITED_INFORMATION 0x1000 -#endif -#else -#define THREAD_RET void * -#define THREAD_CREATE(thr,start,arg) pthread_create(&thr,NULL,start,arg) -#define THREAD_FINISH(thr) pthread_join(thr,NULL) - - /** For MDB_LOCK_FORMAT: True if readers take a pid lock in the lockfile */ -#define MDB_PIDLOCK 1 - -#ifdef MDB_USE_POSIX_SEM - -typedef sem_t *mdb_mutex_t, *mdb_mutexref_t; -#define LOCK_MUTEX0(mutex) mdb_sem_wait(mutex) -#define UNLOCK_MUTEX(mutex) sem_post(mutex) - -static int -mdb_sem_wait(sem_t *sem) -{ - int rc; - while ((rc = sem_wait(sem)) && (rc = errno) == EINTR) ; - return rc; -} - -#elif defined MDB_USE_SYSV_SEM - -typedef struct mdb_mutex { - int semid; - int semnum; - int *locked; -} mdb_mutex_t[1], *mdb_mutexref_t; - -#define LOCK_MUTEX0(mutex) mdb_sem_wait(mutex) -#define UNLOCK_MUTEX(mutex) do { \ - struct sembuf sb = { 0, 1, SEM_UNDO }; \ - sb.sem_num = (mutex)->semnum; \ - *(mutex)->locked = 0; \ - semop((mutex)->semid, &sb, 1); \ -} while(0) - -static int -mdb_sem_wait(mdb_mutexref_t sem) -{ - int rc, *locked = sem->locked; - struct sembuf sb = { 0, -1, SEM_UNDO }; - sb.sem_num = sem->semnum; - do { - if (!semop(sem->semid, &sb, 1)) { - rc = *locked ? MDB_OWNERDEAD : MDB_SUCCESS; - *locked = 1; - break; - } - } while ((rc = errno) == EINTR); - return rc; -} - -#define mdb_mutex_consistent(mutex) 0 - -#else /* MDB_USE_POSIX_MUTEX: */ - /** Shared mutex/semaphore as the original is stored. - * - * Not for copies. Instead it can be assigned to an #mdb_mutexref_t. - * When mdb_mutexref_t is a pointer and mdb_mutex_t is not, then it - * is array[size 1] so it can be assigned to the pointer. - */ -typedef pthread_mutex_t mdb_mutex_t[1]; - /** Reference to an #mdb_mutex_t */ -typedef pthread_mutex_t *mdb_mutexref_t; - /** Lock the reader or writer mutex. - * Returns 0 or a code to give #mdb_mutex_failed(), as in #LOCK_MUTEX(). - */ -#define LOCK_MUTEX0(mutex) pthread_mutex_lock(mutex) - /** Unlock the reader or writer mutex. - */ -#define UNLOCK_MUTEX(mutex) pthread_mutex_unlock(mutex) - /** Mark mutex-protected data as repaired, after death of previous owner. - */ -#define mdb_mutex_consistent(mutex) pthread_mutex_consistent(mutex) -#endif /* MDB_USE_POSIX_SEM || MDB_USE_SYSV_SEM */ - - /** Get the error code for the last failed system function. - */ -#define ErrCode() errno - - /** An abstraction for a file handle. - * On POSIX systems file handles are small integers. On Windows - * they're opaque pointers. - */ -#define HANDLE int - - /** A value for an invalid file handle. - * Mainly used to initialize file variables and signify that they are - * unused. - */ -#define INVALID_HANDLE_VALUE (-1) - - /** Get the size of a memory page for the system. - * This is the basic size that the platform's memory manager uses, and is - * fundamental to the use of memory-mapped files. - */ -#define GET_PAGESIZE(x) ((x) = sysconf(_SC_PAGE_SIZE)) -#endif - -#define Z MDB_FMT_Z /**< printf/scanf format modifier for size_t */ -#define Yu MDB_PRIy(u) /**< printf format for #mdb_size_t */ -#define Yd MDB_PRIy(d) /**< printf format for 'signed #mdb_size_t' */ - -#ifdef MDB_USE_SYSV_SEM -#define MNAME_LEN (sizeof(int)) -#else -#define MNAME_LEN (sizeof(pthread_mutex_t)) -#endif - -/** Initial part of #MDB_env.me_mutexname[]. - * Changes to this code must be reflected in #MDB_LOCK_FORMAT. - */ -#ifdef _WIN32 -#define MUTEXNAME_PREFIX "Global\\MDB" -#elif defined MDB_USE_POSIX_SEM -#define MUTEXNAME_PREFIX "/MDB" -#endif - -/** @} */ - -#ifdef MDB_ROBUST_SUPPORTED - /** Lock mutex, handle any error, set rc = result. - * Return 0 on success, nonzero (not rc) on error. - */ -#define LOCK_MUTEX(rc, env, mutex) \ - (((rc) = LOCK_MUTEX0(mutex)) && \ - ((rc) = mdb_mutex_failed(env, mutex, rc))) -static int mdb_mutex_failed(MDB_env *env, mdb_mutexref_t mutex, int rc); -#else -#define LOCK_MUTEX(rc, env, mutex) ((rc) = LOCK_MUTEX0(mutex)) -#define mdb_mutex_failed(env, mutex, rc) (rc) -#endif - -#ifndef _WIN32 -/** A flag for opening a file and requesting synchronous data writes. - * This is only used when writing a meta page. It's not strictly needed; - * we could just do a normal write and then immediately perform a flush. - * But if this flag is available it saves us an extra system call. - * - * @note If O_DSYNC is undefined but exists in /usr/include, - * preferably set some compiler flag to get the definition. - */ -#ifndef MDB_DSYNC -# ifdef O_DSYNC -# define MDB_DSYNC O_DSYNC -# else -# define MDB_DSYNC O_SYNC -# endif -#endif -#endif - -/** Function for flushing the data of a file. Define this to fsync - * if fdatasync() is not supported. - */ -#ifndef MDB_FDATASYNC -# define MDB_FDATASYNC fdatasync -#endif - -#ifndef MDB_MSYNC -# define MDB_MSYNC(addr,len,flags) msync(addr,len,flags) -#endif - -#ifndef MS_SYNC -#define MS_SYNC 1 -#endif - -#ifndef MS_ASYNC -#define MS_ASYNC 0 -#endif - - /** A page number in the database. - * Note that 64 bit page numbers are overkill, since pages themselves - * already represent 12-13 bits of addressable memory, and the OS will - * always limit applications to a maximum of 63 bits of address space. - * - * @note In the #MDB_node structure, we only store 48 bits of this value, - * which thus limits us to only 60 bits of addressable data. - */ -typedef MDB_ID pgno_t; - - /** A transaction ID. - * See struct MDB_txn.mt_txnid for details. - */ -typedef MDB_ID txnid_t; - -/** @defgroup debug Debug Macros - * @{ - */ -#ifndef MDB_DEBUG - /** Enable debug output. Needs variable argument macros (a C99 feature). - * Set this to 1 for copious tracing. Set to 2 to add dumps of all IDLs - * read from and written to the database (used for free space management). - */ -#define MDB_DEBUG 0 -#endif - -#if MDB_DEBUG -static int mdb_debug; -static txnid_t mdb_debug_start; - - /** Print a debug message with printf formatting. - * Requires double parenthesis around 2 or more args. - */ -# define DPRINTF(args) ((void) ((mdb_debug) && DPRINTF0 args)) -# define DPRINTF0(fmt, ...) \ - fprintf(stderr, "%s:%d " fmt "\n", mdb_func_, __LINE__, __VA_ARGS__) -#else -# define DPRINTF(args) ((void) 0) -#endif - /** Print a debug string. - * The string is printed literally, with no format processing. - */ -#define DPUTS(arg) DPRINTF(("%s", arg)) - /** Debuging output value of a cursor DBI: Negative in a sub-cursor. */ -#define DDBI(mc) \ - (((mc)->mc_flags & C_SUB) ? -(int)(mc)->mc_dbi : (int)(mc)->mc_dbi) -/** @} */ - - /** @brief The maximum size of a database page. - * - * It is 32k or 64k, since value-PAGEBASE must fit in - * #MDB_page.%mp_upper. - * - * LMDB will use database pages < OS pages if needed. - * That causes more I/O in write transactions: The OS must - * know (read) the whole page before writing a partial page. - * - * Note that we don't currently support Huge pages. On Linux, - * regular data files cannot use Huge pages, and in general - * Huge pages aren't actually pageable. We rely on the OS - * demand-pager to read our data and page it out when memory - * pressure from other processes is high. So until OSs have - * actual paging support for Huge pages, they're not viable. - */ -#define MAX_PAGESIZE (PAGEBASE ? 0x10000 : 0x8000) - - /** The minimum number of keys required in a database page. - * Setting this to a larger value will place a smaller bound on the - * maximum size of a data item. Data items larger than this size will - * be pushed into overflow pages instead of being stored directly in - * the B-tree node. This value used to default to 4. With a page size - * of 4096 bytes that meant that any item larger than 1024 bytes would - * go into an overflow page. That also meant that on average 2-3KB of - * each overflow page was wasted space. The value cannot be lower than - * 2 because then there would no longer be a tree structure. With this - * value, items larger than 2KB will go into overflow pages, and on - * average only 1KB will be wasted. - */ -#define MDB_MINKEYS 2 - - /** A stamp that identifies a file as an LMDB file. - * There's nothing special about this value other than that it is easily - * recognizable, and it will reflect any byte order mismatches. - */ -#define MDB_MAGIC 0xBEEFC0DE - - /** The version number for a database's datafile format. */ -#define MDB_DATA_VERSION ((MDB_DEVEL) ? 999 : 1) - /** The version number for a database's lockfile format. */ -#define MDB_LOCK_VERSION ((MDB_DEVEL) ? 999 : 2) - /** Number of bits representing #MDB_LOCK_VERSION in #MDB_LOCK_FORMAT. - * The remaining bits must leave room for #MDB_lock_desc. - */ -#define MDB_LOCK_VERSION_BITS 12 - - /** @brief The max size of a key we can write, or 0 for computed max. - * - * This macro should normally be left alone or set to 0. - * Note that a database with big keys or dupsort data cannot be - * reliably modified by a liblmdb which uses a smaller max. - * The default is 511 for backwards compat, or 0 when #MDB_DEVEL. - * - * Other values are allowed, for backwards compat. However: - * A value bigger than the computed max can break if you do not - * know what you are doing, and liblmdb <= 0.9.10 can break when - * modifying a DB with keys/dupsort data bigger than its max. - * - * Data items in an #MDB_DUPSORT database are also limited to - * this size, since they're actually keys of a sub-DB. Keys and - * #MDB_DUPSORT data items must fit on a node in a regular page. - */ -#ifndef MDB_MAXKEYSIZE -#define MDB_MAXKEYSIZE ((MDB_DEVEL) ? 0 : 511) -#endif - - /** The maximum size of a key we can write to the environment. */ -#if MDB_MAXKEYSIZE -#define ENV_MAXKEY(env) (MDB_MAXKEYSIZE) -#else -#define ENV_MAXKEY(env) ((env)->me_maxkey) -#endif - - /** @brief The maximum size of a data item. - * - * We only store a 32 bit value for node sizes. - */ -#define MAXDATASIZE 0xffffffffUL - -#if MDB_DEBUG - /** Key size which fits in a #DKBUF. - * @ingroup debug - */ -#define DKBUF_MAXKEYSIZE ((MDB_MAXKEYSIZE) > 0 ? (MDB_MAXKEYSIZE) : 511) - /** A key buffer. - * @ingroup debug - * This is used for printing a hex dump of a key's contents. - */ -#define DKBUF char kbuf[DKBUF_MAXKEYSIZE*2+1] - /** Display a key in hex. - * @ingroup debug - * Invoke a function to display a key in hex. - */ -#define DKEY(x) mdb_dkey(x, kbuf) -#else -#define DKBUF -#define DKEY(x) 0 -#endif - - /** An invalid page number. - * Mainly used to denote an empty tree. - */ -#define P_INVALID (~(pgno_t)0) - - /** Test if the flags \b f are set in a flag word \b w. */ -#define F_ISSET(w, f) (((w) & (f)) == (f)) - - /** Round \b n up to an even number. */ -#define EVEN(n) (((n) + 1U) & -2) /* sign-extending -2 to match n+1U */ - - /** Least significant 1-bit of \b n. n must be of an unsigned type. */ -#define LOW_BIT(n) ((n) & (-(n))) - - /** (log2(\b p2) % \b n), for p2 = power of 2 and 0 < n < 8. */ -#define LOG2_MOD(p2, n) (7 - 86 / ((p2) % ((1U<<(n))-1) + 11)) - /* Explanation: Let p2 = 2**(n*y + x), x> (CACHELINE>64), 5)) - + 6 * (sizeof(MDB_PID_T)/4 % 3) /* legacy(2) to word(4/8)? */ - + 18 * (sizeof(pthread_t)/4 % 5) /* can be struct{id, active data} */ - + 90 * (sizeof(MDB_txbody) / CACHELINE % 3) - + 270 * (MDB_LOCK_TYPE % 120) - /* The above is < 270*120 < 2**15 */ - + ((sizeof(txnid_t) == 8) << 15) /* 32bit/64bit */ - + ((sizeof(MDB_reader) > CACHELINE) << 16) - /* Not really needed - implied by MDB_LOCK_TYPE != (_WIN32 locking) */ - + (((MDB_PIDLOCK) != 0) << 17) - /* 18 bits total: Must be <= (32 - MDB_LOCK_VERSION_BITS). */ -}; -/** @} */ - -/** Common header for all page types. The page type depends on #mp_flags. - * - * #P_BRANCH and #P_LEAF pages have unsorted '#MDB_node's at the end, with - * sorted #mp_ptrs[] entries referring to them. Exception: #P_LEAF2 pages - * omit mp_ptrs and pack sorted #MDB_DUPFIXED values after the page header. - * - * #P_OVERFLOW records occupy one or more contiguous pages where only the - * first has a page header. They hold the real data of #F_BIGDATA nodes. - * - * #P_SUBP sub-pages are small leaf "pages" with duplicate data. - * A node with flag #F_DUPDATA but not #F_SUBDATA contains a sub-page. - * (Duplicate data can also go in sub-databases, which use normal pages.) - * - * #P_META pages contain #MDB_meta, the start point of an LMDB snapshot. - * - * Each non-metapage up to #MDB_meta.%mm_last_pg is reachable exactly once - * in the snapshot: Either used by a database or listed in a freeDB record. - */ -typedef struct MDB_page { -#define mp_pgno mp_p.p_pgno -#define mp_next mp_p.p_next - union { - pgno_t p_pgno; /**< page number */ - struct MDB_page *p_next; /**< for in-memory list of freed pages */ - } mp_p; - uint16_t mp_pad; /**< key size if this is a LEAF2 page */ -/** @defgroup mdb_page Page Flags - * @ingroup internal - * Flags for the page headers. - * @{ - */ -#define P_BRANCH 0x01 /**< branch page */ -#define P_LEAF 0x02 /**< leaf page */ -#define P_OVERFLOW 0x04 /**< overflow page */ -#define P_META 0x08 /**< meta page */ -#define P_DIRTY 0x10 /**< dirty page, also set for #P_SUBP pages */ -#define P_LEAF2 0x20 /**< for #MDB_DUPFIXED records */ -#define P_SUBP 0x40 /**< for #MDB_DUPSORT sub-pages */ -#define P_LOOSE 0x4000 /**< page was dirtied then freed, can be reused */ -#define P_KEEP 0x8000 /**< leave this page alone during spill */ -/** @} */ - uint16_t mp_flags; /**< @ref mdb_page */ -#define mp_lower mp_pb.pb.pb_lower -#define mp_upper mp_pb.pb.pb_upper -#define mp_pages mp_pb.pb_pages - union { - struct { - indx_t pb_lower; /**< lower bound of free space */ - indx_t pb_upper; /**< upper bound of free space */ - } pb; - uint32_t pb_pages; /**< number of overflow pages */ - } mp_pb; - indx_t mp_ptrs[1]; /**< dynamic size */ -} MDB_page; - - /** Size of the page header, excluding dynamic data at the end */ -#define PAGEHDRSZ ((unsigned) offsetof(MDB_page, mp_ptrs)) - - /** Address of first usable data byte in a page, after the header */ -#define METADATA(p) ((void *)((char *)(p) + PAGEHDRSZ)) - - /** ITS#7713, change PAGEBASE to handle 65536 byte pages */ -#define PAGEBASE ((MDB_DEVEL) ? PAGEHDRSZ : 0) - - /** Number of nodes on a page */ -#define NUMKEYS(p) (((p)->mp_lower - (PAGEHDRSZ-PAGEBASE)) >> 1) - - /** The amount of space remaining in the page */ -#define SIZELEFT(p) (indx_t)((p)->mp_upper - (p)->mp_lower) - - /** The percentage of space used in the page, in tenths of a percent. */ -#define PAGEFILL(env, p) (1000L * ((env)->me_psize - PAGEHDRSZ - SIZELEFT(p)) / \ - ((env)->me_psize - PAGEHDRSZ)) - /** The minimum page fill factor, in tenths of a percent. - * Pages emptier than this are candidates for merging. - */ -#define FILL_THRESHOLD 250 - - /** Test if a page is a leaf page */ -#define IS_LEAF(p) F_ISSET((p)->mp_flags, P_LEAF) - /** Test if a page is a LEAF2 page */ -#define IS_LEAF2(p) F_ISSET((p)->mp_flags, P_LEAF2) - /** Test if a page is a branch page */ -#define IS_BRANCH(p) F_ISSET((p)->mp_flags, P_BRANCH) - /** Test if a page is an overflow page */ -#define IS_OVERFLOW(p) F_ISSET((p)->mp_flags, P_OVERFLOW) - /** Test if a page is a sub page */ -#define IS_SUBP(p) F_ISSET((p)->mp_flags, P_SUBP) - - /** The number of overflow pages needed to store the given size. */ -#define OVPAGES(size, psize) ((PAGEHDRSZ-1 + (size)) / (psize) + 1) - - /** Link in #MDB_txn.%mt_loose_pgs list. - * Kept outside the page header, which is needed when reusing the page. - */ -#define NEXT_LOOSE_PAGE(p) (*(MDB_page **)((p) + 2)) - - /** Header for a single key/data pair within a page. - * Used in pages of type #P_BRANCH and #P_LEAF without #P_LEAF2. - * We guarantee 2-byte alignment for 'MDB_node's. - * - * #mn_lo and #mn_hi are used for data size on leaf nodes, and for child - * pgno on branch nodes. On 64 bit platforms, #mn_flags is also used - * for pgno. (Branch nodes have no flags). Lo and hi are in host byte - * order in case some accesses can be optimized to 32-bit word access. - * - * Leaf node flags describe node contents. #F_BIGDATA says the node's - * data part is the page number of an overflow page with actual data. - * #F_DUPDATA and #F_SUBDATA can be combined giving duplicate data in - * a sub-page/sub-database, and named databases (just #F_SUBDATA). - */ -typedef struct MDB_node { - /** part of data size or pgno - * @{ */ -#if BYTE_ORDER == LITTLE_ENDIAN - unsigned short mn_lo, mn_hi; -#else - unsigned short mn_hi, mn_lo; -#endif - /** @} */ -/** @defgroup mdb_node Node Flags - * @ingroup internal - * Flags for node headers. - * @{ - */ -#define F_BIGDATA 0x01 /**< data put on overflow page */ -#define F_SUBDATA 0x02 /**< data is a sub-database */ -#define F_DUPDATA 0x04 /**< data has duplicates */ - -/** valid flags for #mdb_node_add() */ -#define NODE_ADD_FLAGS (F_DUPDATA|F_SUBDATA|MDB_RESERVE|MDB_APPEND) - -/** @} */ - unsigned short mn_flags; /**< @ref mdb_node */ - unsigned short mn_ksize; /**< key size */ - char mn_data[1]; /**< key and data are appended here */ -} MDB_node; - - /** Size of the node header, excluding dynamic data at the end */ -#define NODESIZE offsetof(MDB_node, mn_data) - - /** Bit position of top word in page number, for shifting mn_flags */ -#define PGNO_TOPWORD ((pgno_t)-1 > 0xffffffffu ? 32 : 0) - - /** Size of a node in a branch page with a given key. - * This is just the node header plus the key, there is no data. - */ -#define INDXSIZE(k) (NODESIZE + ((k) == NULL ? 0 : (k)->mv_size)) - - /** Size of a node in a leaf page with a given key and data. - * This is node header plus key plus data size. - */ -#define LEAFSIZE(k, d) (NODESIZE + (k)->mv_size + (d)->mv_size) - - /** Address of node \b i in page \b p */ -#define NODEPTR(p, i) ((MDB_node *)((char *)(p) + (p)->mp_ptrs[i] + PAGEBASE)) - - /** Address of the key for the node */ -#define NODEKEY(node) (void *)((node)->mn_data) - - /** Address of the data for a node */ -#define NODEDATA(node) (void *)((char *)(node)->mn_data + (node)->mn_ksize) - - /** Get the page number pointed to by a branch node */ -#define NODEPGNO(node) \ - ((node)->mn_lo | ((pgno_t) (node)->mn_hi << 16) | \ - (PGNO_TOPWORD ? ((pgno_t) (node)->mn_flags << PGNO_TOPWORD) : 0)) - /** Set the page number in a branch node */ -#define SETPGNO(node,pgno) do { \ - (node)->mn_lo = (pgno) & 0xffff; (node)->mn_hi = (pgno) >> 16; \ - if (PGNO_TOPWORD) (node)->mn_flags = (pgno) >> PGNO_TOPWORD; } while(0) - - /** Get the size of the data in a leaf node */ -#define NODEDSZ(node) ((node)->mn_lo | ((unsigned)(node)->mn_hi << 16)) - /** Set the size of the data for a leaf node */ -#define SETDSZ(node,size) do { \ - (node)->mn_lo = (size) & 0xffff; (node)->mn_hi = (size) >> 16;} while(0) - /** The size of a key in a node */ -#define NODEKSZ(node) ((node)->mn_ksize) - - /** Copy a page number from src to dst */ -#ifdef MISALIGNED_OK -#define COPY_PGNO(dst,src) dst = src -#else -#if MDB_SIZE_MAX > 0xffffffffU -#define COPY_PGNO(dst,src) do { \ - unsigned short *s, *d; \ - s = (unsigned short *)&(src); \ - d = (unsigned short *)&(dst); \ - *d++ = *s++; \ - *d++ = *s++; \ - *d++ = *s++; \ - *d = *s; \ -} while (0) -#else -#define COPY_PGNO(dst,src) do { \ - unsigned short *s, *d; \ - s = (unsigned short *)&(src); \ - d = (unsigned short *)&(dst); \ - *d++ = *s++; \ - *d = *s; \ -} while (0) -#endif -#endif - /** The address of a key in a LEAF2 page. - * LEAF2 pages are used for #MDB_DUPFIXED sorted-duplicate sub-DBs. - * There are no node headers, keys are stored contiguously. - */ -#define LEAF2KEY(p, i, ks) ((char *)(p) + PAGEHDRSZ + ((i)*(ks))) - - /** Set the \b node's key into \b keyptr, if requested. */ -#define MDB_GET_KEY(node, keyptr) { if ((keyptr) != NULL) { \ - (keyptr)->mv_size = NODEKSZ(node); (keyptr)->mv_data = NODEKEY(node); } } - - /** Set the \b node's key into \b key. */ -#define MDB_GET_KEY2(node, key) { key.mv_size = NODEKSZ(node); key.mv_data = NODEKEY(node); } - - /** Information about a single database in the environment. */ -typedef struct MDB_db { - uint32_t md_pad; /**< also ksize for LEAF2 pages */ - uint16_t md_flags; /**< @ref mdb_dbi_open */ - uint16_t md_depth; /**< depth of this tree */ - pgno_t md_branch_pages; /**< number of internal pages */ - pgno_t md_leaf_pages; /**< number of leaf pages */ - pgno_t md_overflow_pages; /**< number of overflow pages */ - mdb_size_t md_entries; /**< number of data items */ - pgno_t md_root; /**< the root page of this tree */ -} MDB_db; - -#define MDB_VALID 0x8000 /**< DB handle is valid, for me_dbflags */ -#define PERSISTENT_FLAGS (0xffff & ~(MDB_VALID)) - /** #mdb_dbi_open() flags */ -#define VALID_FLAGS (MDB_REVERSEKEY|MDB_DUPSORT|MDB_INTEGERKEY|MDB_DUPFIXED|\ - MDB_INTEGERDUP|MDB_REVERSEDUP|MDB_CREATE) - - /** Handle for the DB used to track free pages. */ -#define FREE_DBI 0 - /** Handle for the default DB. */ -#define MAIN_DBI 1 - /** Number of DBs in metapage (free and main) - also hardcoded elsewhere */ -#define CORE_DBS 2 - - /** Number of meta pages - also hardcoded elsewhere */ -#define NUM_METAS 2 - - /** Meta page content. - * A meta page is the start point for accessing a database snapshot. - * Pages 0-1 are meta pages. Transaction N writes meta page #(N % 2). - */ -typedef struct MDB_meta { - /** Stamp identifying this as an LMDB file. It must be set - * to #MDB_MAGIC. */ - uint32_t mm_magic; - /** Version number of this file. Must be set to #MDB_DATA_VERSION. */ - uint32_t mm_version; -#ifdef MDB_VL32 - union { /* always zero since we don't support fixed mapping in MDB_VL32 */ - MDB_ID mmun_ull; - void *mmun_address; - } mm_un; -#define mm_address mm_un.mmun_address -#else - void *mm_address; /**< address for fixed mapping */ -#endif - mdb_size_t mm_mapsize; /**< size of mmap region */ - MDB_db mm_dbs[CORE_DBS]; /**< first is free space, 2nd is main db */ - /** The size of pages used in this DB */ -#define mm_psize mm_dbs[FREE_DBI].md_pad - /** Any persistent environment flags. @ref mdb_env */ -#define mm_flags mm_dbs[FREE_DBI].md_flags - /** Last used page in the datafile. - * Actually the file may be shorter if the freeDB lists the final pages. - */ - pgno_t mm_last_pg; - volatile txnid_t mm_txnid; /**< txnid that committed this page */ -} MDB_meta; - - /** Buffer for a stack-allocated meta page. - * The members define size and alignment, and silence type - * aliasing warnings. They are not used directly; that could - * mean incorrectly using several union members in parallel. - */ -typedef union MDB_metabuf { - MDB_page mb_page; - struct { - char mm_pad[PAGEHDRSZ]; - MDB_meta mm_meta; - } mb_metabuf; -} MDB_metabuf; - - /** Auxiliary DB info. - * The information here is mostly static/read-only. There is - * only a single copy of this record in the environment. - */ -typedef struct MDB_dbx { - MDB_val md_name; /**< name of the database */ - MDB_cmp_func *md_cmp; /**< function for comparing keys */ - MDB_cmp_func *md_dcmp; /**< function for comparing data items */ - MDB_rel_func *md_rel; /**< user relocate function */ - void *md_relctx; /**< user-provided context for md_rel */ -} MDB_dbx; - - /** A database transaction. - * Every operation requires a transaction handle. - */ -struct MDB_txn { - MDB_txn *mt_parent; /**< parent of a nested txn */ - /** Nested txn under this txn, set together with flag #MDB_TXN_HAS_CHILD */ - MDB_txn *mt_child; - pgno_t mt_next_pgno; /**< next unallocated page */ -#ifdef MDB_VL32 - pgno_t mt_last_pgno; /**< last written page */ -#endif - /** The ID of this transaction. IDs are integers incrementing from 1. - * Only committed write transactions increment the ID. If a transaction - * aborts, the ID may be re-used by the next writer. - */ - txnid_t mt_txnid; - MDB_env *mt_env; /**< the DB environment */ - /** The list of pages that became unused during this transaction. - */ - MDB_IDL mt_free_pgs; - /** The list of loose pages that became unused and may be reused - * in this transaction, linked through #NEXT_LOOSE_PAGE(page). - */ - MDB_page *mt_loose_pgs; - /** Number of loose pages (#mt_loose_pgs) */ - int mt_loose_count; - /** The sorted list of dirty pages we temporarily wrote to disk - * because the dirty list was full. page numbers in here are - * shifted left by 1, deleted slots have the LSB set. - */ - MDB_IDL mt_spill_pgs; - union { - /** For write txns: Modified pages. Sorted when not MDB_WRITEMAP. */ - MDB_ID2L dirty_list; - /** For read txns: This thread/txn's reader table slot, or NULL. */ - MDB_reader *reader; - } mt_u; - /** Array of records for each DB known in the environment. */ - MDB_dbx *mt_dbxs; - /** Array of MDB_db records for each known DB */ - MDB_db *mt_dbs; - /** Array of sequence numbers for each DB handle */ - unsigned int *mt_dbiseqs; -/** @defgroup mt_dbflag Transaction DB Flags - * @ingroup internal - * @{ - */ -#define DB_DIRTY 0x01 /**< DB was written in this txn */ -#define DB_STALE 0x02 /**< Named-DB record is older than txnID */ -#define DB_NEW 0x04 /**< Named-DB handle opened in this txn */ -#define DB_VALID 0x08 /**< DB handle is valid, see also #MDB_VALID */ -#define DB_USRVALID 0x10 /**< As #DB_VALID, but not set for #FREE_DBI */ -#define DB_DUPDATA 0x20 /**< DB is #MDB_DUPSORT data */ -/** @} */ - /** In write txns, array of cursors for each DB */ - MDB_cursor **mt_cursors; - /** Array of flags for each DB */ - unsigned char *mt_dbflags; -#ifdef MDB_VL32 - /** List of read-only pages (actually chunks) */ - MDB_ID3L mt_rpages; - /** We map chunks of 16 pages. Even though Windows uses 4KB pages, all - * mappings must begin on 64KB boundaries. So we round off all pgnos to - * a chunk boundary. We do the same on Linux for symmetry, and also to - * reduce the frequency of mmap/munmap calls. - */ -#define MDB_RPAGE_CHUNK 16 -#define MDB_TRPAGE_SIZE 4096 /**< size of #mt_rpages array of chunks */ -#define MDB_TRPAGE_MAX (MDB_TRPAGE_SIZE-1) /**< maximum chunk index */ - unsigned int mt_rpcheck; /**< threshold for reclaiming unref'd chunks */ -#endif - /** Number of DB records in use, or 0 when the txn is finished. - * This number only ever increments until the txn finishes; we - * don't decrement it when individual DB handles are closed. - */ - MDB_dbi mt_numdbs; - -/** @defgroup mdb_txn Transaction Flags - * @ingroup internal - * @{ - */ - /** #mdb_txn_begin() flags */ -#define MDB_TXN_BEGIN_FLAGS (MDB_NOMETASYNC|MDB_NOSYNC|MDB_RDONLY) -#define MDB_TXN_NOMETASYNC MDB_NOMETASYNC /**< don't sync meta for this txn on commit */ -#define MDB_TXN_NOSYNC MDB_NOSYNC /**< don't sync this txn on commit */ -#define MDB_TXN_RDONLY MDB_RDONLY /**< read-only transaction */ - /* internal txn flags */ -#define MDB_TXN_WRITEMAP MDB_WRITEMAP /**< copy of #MDB_env flag in writers */ -#define MDB_TXN_FINISHED 0x01 /**< txn is finished or never began */ -#define MDB_TXN_ERROR 0x02 /**< txn is unusable after an error */ -#define MDB_TXN_DIRTY 0x04 /**< must write, even if dirty list is empty */ -#define MDB_TXN_SPILLS 0x08 /**< txn or a parent has spilled pages */ -#define MDB_TXN_HAS_CHILD 0x10 /**< txn has an #MDB_txn.%mt_child */ - /** most operations on the txn are currently illegal */ -#define MDB_TXN_BLOCKED (MDB_TXN_FINISHED|MDB_TXN_ERROR|MDB_TXN_HAS_CHILD) -/** @} */ - unsigned int mt_flags; /**< @ref mdb_txn */ - /** #dirty_list room: Array size - \#dirty pages visible to this txn. - * Includes ancestor txns' dirty pages not hidden by other txns' - * dirty/spilled pages. Thus commit(nested txn) has room to merge - * dirty_list into mt_parent after freeing hidden mt_parent pages. - */ - unsigned int mt_dirty_room; -}; - -/** Enough space for 2^32 nodes with minimum of 2 keys per node. I.e., plenty. - * At 4 keys per node, enough for 2^64 nodes, so there's probably no need to - * raise this on a 64 bit machine. - */ -#define CURSOR_STACK 32 - -struct MDB_xcursor; - - /** Cursors are used for all DB operations. - * A cursor holds a path of (page pointer, key index) from the DB - * root to a position in the DB, plus other state. #MDB_DUPSORT - * cursors include an xcursor to the current data item. Write txns - * track their cursors and keep them up to date when data moves. - * Exception: An xcursor's pointer to a #P_SUBP page can be stale. - * (A node with #F_DUPDATA but no #F_SUBDATA contains a subpage). - */ -struct MDB_cursor { - /** Next cursor on this DB in this txn */ - MDB_cursor *mc_next; - /** Backup of the original cursor if this cursor is a shadow */ - MDB_cursor *mc_backup; - /** Context used for databases with #MDB_DUPSORT, otherwise NULL */ - struct MDB_xcursor *mc_xcursor; - /** The transaction that owns this cursor */ - MDB_txn *mc_txn; - /** The database handle this cursor operates on */ - MDB_dbi mc_dbi; - /** The database record for this cursor */ - MDB_db *mc_db; - /** The database auxiliary record for this cursor */ - MDB_dbx *mc_dbx; - /** The @ref mt_dbflag for this database */ - unsigned char *mc_dbflag; - unsigned short mc_snum; /**< number of pushed pages */ - unsigned short mc_top; /**< index of top page, normally mc_snum-1 */ -/** @defgroup mdb_cursor Cursor Flags - * @ingroup internal - * Cursor state flags. - * @{ - */ -#define C_INITIALIZED 0x01 /**< cursor has been initialized and is valid */ -#define C_EOF 0x02 /**< No more data */ -#define C_SUB 0x04 /**< Cursor is a sub-cursor */ -#define C_DEL 0x08 /**< last op was a cursor_del */ -#define C_UNTRACK 0x40 /**< Un-track cursor when closing */ -#define C_WRITEMAP MDB_TXN_WRITEMAP /**< Copy of txn flag */ -/** Read-only cursor into the txn's original snapshot in the map. - * Set for read-only txns, and in #mdb_page_alloc() for #FREE_DBI when - * #MDB_DEVEL & 2. Only implements code which is necessary for this. - */ -#define C_ORIG_RDONLY MDB_TXN_RDONLY -/** @} */ - unsigned int mc_flags; /**< @ref mdb_cursor */ - MDB_page *mc_pg[CURSOR_STACK]; /**< stack of pushed pages */ - indx_t mc_ki[CURSOR_STACK]; /**< stack of page indices */ -#ifdef MDB_VL32 - MDB_page *mc_ovpg; /**< a referenced overflow page */ -# define MC_OVPG(mc) ((mc)->mc_ovpg) -# define MC_SET_OVPG(mc, pg) ((mc)->mc_ovpg = (pg)) -#else -# define MC_OVPG(mc) ((MDB_page *)0) -# define MC_SET_OVPG(mc, pg) ((void)0) -#endif -}; - - /** Context for sorted-dup records. - * We could have gone to a fully recursive design, with arbitrarily - * deep nesting of sub-databases. But for now we only handle these - * levels - main DB, optional sub-DB, sorted-duplicate DB. - */ -typedef struct MDB_xcursor { - /** A sub-cursor for traversing the Dup DB */ - MDB_cursor mx_cursor; - /** The database record for this Dup DB */ - MDB_db mx_db; - /** The auxiliary DB record for this Dup DB */ - MDB_dbx mx_dbx; - /** The @ref mt_dbflag for this Dup DB */ - unsigned char mx_dbflag; -} MDB_xcursor; - - /** Check if there is an inited xcursor */ -#define XCURSOR_INITED(mc) \ - ((mc)->mc_xcursor && ((mc)->mc_xcursor->mx_cursor.mc_flags & C_INITIALIZED)) - - /** Update the xcursor's sub-page pointer, if any, in \b mc. Needed - * when the node which contains the sub-page may have moved. Called - * with leaf page \b mp = mc->mc_pg[\b top]. - */ -#define XCURSOR_REFRESH(mc, top, mp) do { \ - MDB_page *xr_pg = (mp); \ - MDB_node *xr_node; \ - if (!XCURSOR_INITED(mc) || (mc)->mc_ki[top] >= NUMKEYS(xr_pg)) break; \ - xr_node = NODEPTR(xr_pg, (mc)->mc_ki[top]); \ - if ((xr_node->mn_flags & (F_DUPDATA|F_SUBDATA)) == F_DUPDATA) \ - (mc)->mc_xcursor->mx_cursor.mc_pg[0] = NODEDATA(xr_node); \ -} while (0) - - /** State of FreeDB old pages, stored in the MDB_env */ -typedef struct MDB_pgstate { - pgno_t *mf_pghead; /**< Reclaimed freeDB pages, or NULL before use */ - txnid_t mf_pglast; /**< ID of last used record, or 0 if !mf_pghead */ -} MDB_pgstate; - - /** The database environment. */ -struct MDB_env { - HANDLE me_fd; /**< The main data file */ - HANDLE me_lfd; /**< The lock file */ - HANDLE me_mfd; /**< For writing and syncing the meta pages */ -#if defined(MDB_VL32) && defined(_WIN32) - HANDLE me_fmh; /**< File Mapping handle */ -#endif - /** Failed to update the meta page. Probably an I/O error. */ -#define MDB_FATAL_ERROR 0x80000000U - /** Some fields are initialized. */ -#define MDB_ENV_ACTIVE 0x20000000U - /** me_txkey is set */ -#define MDB_ENV_TXKEY 0x10000000U - /** fdatasync is unreliable */ -#define MDB_FSYNCONLY 0x08000000U - uint32_t me_flags; /**< @ref mdb_env */ - unsigned int me_psize; /**< DB page size, inited from me_os_psize */ - unsigned int me_os_psize; /**< OS page size, from #GET_PAGESIZE */ - unsigned int me_maxreaders; /**< size of the reader table */ - /** Max #MDB_txninfo.%mti_numreaders of interest to #mdb_env_close() */ - volatile int me_close_readers; - MDB_dbi me_numdbs; /**< number of DBs opened */ - MDB_dbi me_maxdbs; /**< size of the DB table */ - MDB_PID_T me_pid; /**< process ID of this env */ - char *me_path; /**< path to the DB files */ - char *me_map; /**< the memory map of the data file */ - MDB_txninfo *me_txns; /**< the memory map of the lock file or NULL */ - MDB_meta *me_metas[NUM_METAS]; /**< pointers to the two meta pages */ - void *me_pbuf; /**< scratch area for DUPSORT put() */ - MDB_txn *me_txn; /**< current write transaction */ - MDB_txn *me_txn0; /**< prealloc'd write transaction */ - mdb_size_t me_mapsize; /**< size of the data memory map */ - off_t me_size; /**< current file size */ - pgno_t me_maxpg; /**< me_mapsize / me_psize */ - MDB_dbx *me_dbxs; /**< array of static DB info */ - uint16_t *me_dbflags; /**< array of flags from MDB_db.md_flags */ - unsigned int *me_dbiseqs; /**< array of dbi sequence numbers */ - pthread_key_t me_txkey; /**< thread-key for readers */ - txnid_t me_pgoldest; /**< ID of oldest reader last time we looked */ - MDB_pgstate me_pgstate; /**< state of old pages from freeDB */ -# define me_pglast me_pgstate.mf_pglast -# define me_pghead me_pgstate.mf_pghead - MDB_page *me_dpages; /**< list of malloc'd blocks for re-use */ - /** IDL of pages that became unused in a write txn */ - MDB_IDL me_free_pgs; - /** ID2L of pages written during a write txn. Length MDB_IDL_UM_SIZE. */ - MDB_ID2L me_dirty_list; - /** Max number of freelist items that can fit in a single overflow page */ - int me_maxfree_1pg; - /** Max size of a node on a page */ - unsigned int me_nodemax; -#if !(MDB_MAXKEYSIZE) - unsigned int me_maxkey; /**< max size of a key */ -#endif - int me_live_reader; /**< have liveness lock in reader table */ -#ifdef _WIN32 - int me_pidquery; /**< Used in OpenProcess */ -#endif -#ifdef MDB_USE_POSIX_MUTEX /* Posix mutexes reside in shared mem */ -# define me_rmutex me_txns->mti_rmutex /**< Shared reader lock */ -# define me_wmutex me_txns->mti_wmutex /**< Shared writer lock */ -#else - mdb_mutex_t me_rmutex; - mdb_mutex_t me_wmutex; -# if defined(_WIN32) || defined(MDB_USE_POSIX_SEM) - /** Half-initialized name of mutexes, to be completed by #MUTEXNAME() */ - char me_mutexname[sizeof(MUTEXNAME_PREFIX) + 11]; -# endif -#endif -#ifdef MDB_VL32 - MDB_ID3L me_rpages; /**< like #mt_rpages, but global to env */ - pthread_mutex_t me_rpmutex; /**< control access to #me_rpages */ -#define MDB_ERPAGE_SIZE 16384 -#define MDB_ERPAGE_MAX (MDB_ERPAGE_SIZE-1) - unsigned int me_rpcheck; -#endif - void *me_userctx; /**< User-settable context */ - MDB_assert_func *me_assert_func; /**< Callback for assertion failures */ -}; - - /** Nested transaction */ -typedef struct MDB_ntxn { - MDB_txn mnt_txn; /**< the transaction */ - MDB_pgstate mnt_pgstate; /**< parent transaction's saved freestate */ -} MDB_ntxn; - - /** max number of pages to commit in one writev() call */ -#define MDB_COMMIT_PAGES 64 -#if defined(IOV_MAX) && IOV_MAX < MDB_COMMIT_PAGES -#undef MDB_COMMIT_PAGES -#define MDB_COMMIT_PAGES IOV_MAX -#endif - - /** max bytes to write in one call */ -#define MAX_WRITE (0x40000000U >> (sizeof(ssize_t) == 4)) - - /** Check \b txn and \b dbi arguments to a function */ -#define TXN_DBI_EXIST(txn, dbi, validity) \ - ((txn) && (dbi)<(txn)->mt_numdbs && ((txn)->mt_dbflags[dbi] & (validity))) - - /** Check for misused \b dbi handles */ -#define TXN_DBI_CHANGED(txn, dbi) \ - ((txn)->mt_dbiseqs[dbi] != (txn)->mt_env->me_dbiseqs[dbi]) - -static int mdb_page_alloc(MDB_cursor *mc, int num, MDB_page **mp); -static int mdb_page_new(MDB_cursor *mc, uint32_t flags, int num, MDB_page **mp); -static int mdb_page_touch(MDB_cursor *mc); - -#define MDB_END_NAMES {"committed", "empty-commit", "abort", "reset", \ - "reset-tmp", "fail-begin", "fail-beginchild"} -enum { - /* mdb_txn_end operation number, for logging */ - MDB_END_COMMITTED, MDB_END_EMPTY_COMMIT, MDB_END_ABORT, MDB_END_RESET, - MDB_END_RESET_TMP, MDB_END_FAIL_BEGIN, MDB_END_FAIL_BEGINCHILD -}; -#define MDB_END_OPMASK 0x0F /**< mask for #mdb_txn_end() operation number */ -#define MDB_END_UPDATE 0x10 /**< update env state (DBIs) */ -#define MDB_END_FREE 0x20 /**< free txn unless it is #MDB_env.%me_txn0 */ -#define MDB_END_SLOT MDB_NOTLS /**< release any reader slot if #MDB_NOTLS */ -static void mdb_txn_end(MDB_txn *txn, unsigned mode); - -static int mdb_page_get(MDB_cursor *mc, pgno_t pgno, MDB_page **mp, int *lvl); -static int mdb_page_search_root(MDB_cursor *mc, - MDB_val *key, int modify); -#define MDB_PS_MODIFY 1 -#define MDB_PS_ROOTONLY 2 -#define MDB_PS_FIRST 4 -#define MDB_PS_LAST 8 -static int mdb_page_search(MDB_cursor *mc, - MDB_val *key, int flags); -static int mdb_page_merge(MDB_cursor *csrc, MDB_cursor *cdst); - -#define MDB_SPLIT_REPLACE MDB_APPENDDUP /**< newkey is not new */ -static int mdb_page_split(MDB_cursor *mc, MDB_val *newkey, MDB_val *newdata, - pgno_t newpgno, unsigned int nflags); - -static int mdb_env_read_header(MDB_env *env, int prev, MDB_meta *meta); -static MDB_meta *mdb_env_pick_meta(const MDB_env *env); -static int mdb_env_write_meta(MDB_txn *txn); -#ifdef MDB_USE_POSIX_MUTEX /* Drop unused excl arg */ -# define mdb_env_close0(env, excl) mdb_env_close1(env) -#endif -static void mdb_env_close0(MDB_env *env, int excl); - -static MDB_node *mdb_node_search(MDB_cursor *mc, MDB_val *key, int *exactp); -static int mdb_node_add(MDB_cursor *mc, indx_t indx, - MDB_val *key, MDB_val *data, pgno_t pgno, unsigned int flags); -static void mdb_node_del(MDB_cursor *mc, int ksize); -static void mdb_node_shrink(MDB_page *mp, indx_t indx); -static int mdb_node_move(MDB_cursor *csrc, MDB_cursor *cdst, int fromleft); -static int mdb_node_read(MDB_cursor *mc, MDB_node *leaf, MDB_val *data); -static size_t mdb_leaf_size(MDB_env *env, MDB_val *key, MDB_val *data); -static size_t mdb_branch_size(MDB_env *env, MDB_val *key); - -static int mdb_rebalance(MDB_cursor *mc); -static int mdb_update_key(MDB_cursor *mc, MDB_val *key); - -static void mdb_cursor_pop(MDB_cursor *mc); -static int mdb_cursor_push(MDB_cursor *mc, MDB_page *mp); - -static int mdb_cursor_del0(MDB_cursor *mc); -static int mdb_del0(MDB_txn *txn, MDB_dbi dbi, MDB_val *key, MDB_val *data, unsigned flags); -static int mdb_cursor_sibling(MDB_cursor *mc, int move_right); -static int mdb_cursor_next(MDB_cursor *mc, MDB_val *key, MDB_val *data, MDB_cursor_op op); -static int mdb_cursor_prev(MDB_cursor *mc, MDB_val *key, MDB_val *data, MDB_cursor_op op); -static int mdb_cursor_set(MDB_cursor *mc, MDB_val *key, MDB_val *data, MDB_cursor_op op, - int *exactp); -static int mdb_cursor_first(MDB_cursor *mc, MDB_val *key, MDB_val *data); -static int mdb_cursor_last(MDB_cursor *mc, MDB_val *key, MDB_val *data); - -static void mdb_cursor_init(MDB_cursor *mc, MDB_txn *txn, MDB_dbi dbi, MDB_xcursor *mx); -static void mdb_xcursor_init0(MDB_cursor *mc); -static void mdb_xcursor_init1(MDB_cursor *mc, MDB_node *node); -static void mdb_xcursor_init2(MDB_cursor *mc, MDB_xcursor *src_mx, int force); - -static int mdb_drop0(MDB_cursor *mc, int subs); -static void mdb_default_cmp(MDB_txn *txn, MDB_dbi dbi); -static int mdb_reader_check0(MDB_env *env, int rlocked, int *dead); - -/** @cond */ -static MDB_cmp_func mdb_cmp_memn, mdb_cmp_memnr, mdb_cmp_int, mdb_cmp_cint, mdb_cmp_long; -/** @endcond */ - -/** Compare two items pointing at '#mdb_size_t's of unknown alignment. */ -#ifdef MISALIGNED_OK -# define mdb_cmp_clong mdb_cmp_long -#else -# define mdb_cmp_clong mdb_cmp_cint -#endif - -/** True if we need #mdb_cmp_clong() instead of \b cmp for #MDB_INTEGERDUP */ -#define NEED_CMP_CLONG(cmp, ksize) \ - (UINT_MAX < MDB_SIZE_MAX && \ - (cmp) == mdb_cmp_int && (ksize) == sizeof(mdb_size_t)) - -#ifdef _WIN32 -static SECURITY_DESCRIPTOR mdb_null_sd; -static SECURITY_ATTRIBUTES mdb_all_sa; -static int mdb_sec_inited; - -struct MDB_name; -static int utf8_to_utf16(const char *src, struct MDB_name *dst, int xtra); -#endif - -/** Return the library version info. */ -char * ESECT -mdb_version(int *major, int *minor, int *patch) -{ - if (major) *major = MDB_VERSION_MAJOR; - if (minor) *minor = MDB_VERSION_MINOR; - if (patch) *patch = MDB_VERSION_PATCH; - return MDB_VERSION_STRING; -} - -/** Table of descriptions for LMDB @ref errors */ -static char *const mdb_errstr[] = { - "MDB_KEYEXIST: Key/data pair already exists", - "MDB_NOTFOUND: No matching key/data pair found", - "MDB_PAGE_NOTFOUND: Requested page not found", - "MDB_CORRUPTED: Located page was wrong type", - "MDB_PANIC: Update of meta page failed or environment had fatal error", - "MDB_VERSION_MISMATCH: Database environment version mismatch", - "MDB_INVALID: File is not an LMDB file", - "MDB_MAP_FULL: Environment mapsize limit reached", - "MDB_DBS_FULL: Environment maxdbs limit reached", - "MDB_READERS_FULL: Environment maxreaders limit reached", - "MDB_TLS_FULL: Thread-local storage keys full - too many environments open", - "MDB_TXN_FULL: Transaction has too many dirty pages - transaction too big", - "MDB_CURSOR_FULL: Internal error - cursor stack limit reached", - "MDB_PAGE_FULL: Internal error - page has no more space", - "MDB_MAP_RESIZED: Database contents grew beyond environment mapsize", - "MDB_INCOMPATIBLE: Operation and DB incompatible, or DB flags changed", - "MDB_BAD_RSLOT: Invalid reuse of reader locktable slot", - "MDB_BAD_TXN: Transaction must abort, has a child, or is invalid", - "MDB_BAD_VALSIZE: Unsupported size of key/DB name/data, or wrong DUPFIXED size", - "MDB_BAD_DBI: The specified DBI handle was closed/changed unexpectedly", - "MDB_PROBLEM: Unexpected problem - txn should abort", -}; - -char * -mdb_strerror(int err) -{ -#ifdef _WIN32 - /** HACK: pad 4KB on stack over the buf. Return system msgs in buf. - * This works as long as no function between the call to mdb_strerror - * and the actual use of the message uses more than 4K of stack. - */ -#define MSGSIZE 1024 -#define PADSIZE 4096 - char buf[MSGSIZE+PADSIZE], *ptr = buf; -#endif - int i; - if (!err) - return ("Successful return: 0"); - - if (err >= MDB_KEYEXIST && err <= MDB_LAST_ERRCODE) { - i = err - MDB_KEYEXIST; - return mdb_errstr[i]; - } - -#ifdef _WIN32 - /* These are the C-runtime error codes we use. The comment indicates - * their numeric value, and the Win32 error they would correspond to - * if the error actually came from a Win32 API. A major mess, we should - * have used LMDB-specific error codes for everything. - */ - switch(err) { - case ENOENT: /* 2, FILE_NOT_FOUND */ - case EIO: /* 5, ACCESS_DENIED */ - case ENOMEM: /* 12, INVALID_ACCESS */ - case EACCES: /* 13, INVALID_DATA */ - case EBUSY: /* 16, CURRENT_DIRECTORY */ - case EINVAL: /* 22, BAD_COMMAND */ - case ENOSPC: /* 28, OUT_OF_PAPER */ - return strerror(err); - default: - ; - } - buf[0] = 0; - FormatMessageA(FORMAT_MESSAGE_FROM_SYSTEM | - FORMAT_MESSAGE_IGNORE_INSERTS, - NULL, err, 0, ptr, MSGSIZE, (va_list *)buf+MSGSIZE); - return ptr; -#else - return strerror(err); -#endif -} - -/** assert(3) variant in cursor context */ -#define mdb_cassert(mc, expr) mdb_assert0((mc)->mc_txn->mt_env, expr, #expr) -/** assert(3) variant in transaction context */ -#define mdb_tassert(txn, expr) mdb_assert0((txn)->mt_env, expr, #expr) -/** assert(3) variant in environment context */ -#define mdb_eassert(env, expr) mdb_assert0(env, expr, #expr) - -#ifndef NDEBUG -# define mdb_assert0(env, expr, expr_txt) ((expr) ? (void)0 : \ - mdb_assert_fail(env, expr_txt, mdb_func_, __FILE__, __LINE__)) - -static void ESECT -mdb_assert_fail(MDB_env *env, const char *expr_txt, - const char *func, const char *file, int line) -{ - char buf[400]; - sprintf(buf, "%.100s:%d: Assertion '%.200s' failed in %.40s()", - file, line, expr_txt, func); - if (env->me_assert_func) - env->me_assert_func(env, buf); - fprintf(stderr, "%s\n", buf); - abort(); -} -#else -# define mdb_assert0(env, expr, expr_txt) ((void) 0) -#endif /* NDEBUG */ - -#if MDB_DEBUG -/** Return the page number of \b mp which may be sub-page, for debug output */ -static pgno_t -mdb_dbg_pgno(MDB_page *mp) -{ - pgno_t ret; - COPY_PGNO(ret, mp->mp_pgno); - return ret; -} - -/** Display a key in hexadecimal and return the address of the result. - * @param[in] key the key to display - * @param[in] buf the buffer to write into. Should always be #DKBUF. - * @return The key in hexadecimal form. - */ -char * -mdb_dkey(MDB_val *key, char *buf) -{ - char *ptr = buf; - unsigned char *c = key->mv_data; - unsigned int i; - - if (!key) - return ""; - - if (key->mv_size > DKBUF_MAXKEYSIZE) - return "MDB_MAXKEYSIZE"; - /* may want to make this a dynamic check: if the key is mostly - * printable characters, print it as-is instead of converting to hex. - */ -#if 1 - buf[0] = '\0'; - for (i=0; imv_size; i++) - ptr += sprintf(ptr, "%02x", *c++); -#else - sprintf(buf, "%.*s", key->mv_size, key->mv_data); -#endif - return buf; -} - -static const char * -mdb_leafnode_type(MDB_node *n) -{ - static char *const tp[2][2] = {{"", ": DB"}, {": sub-page", ": sub-DB"}}; - return F_ISSET(n->mn_flags, F_BIGDATA) ? ": overflow page" : - tp[F_ISSET(n->mn_flags, F_DUPDATA)][F_ISSET(n->mn_flags, F_SUBDATA)]; -} - -/** Display all the keys in the page. */ -void -mdb_page_list(MDB_page *mp) -{ - pgno_t pgno = mdb_dbg_pgno(mp); - const char *type, *state = (mp->mp_flags & P_DIRTY) ? ", dirty" : ""; - MDB_node *node; - unsigned int i, nkeys, nsize, total = 0; - MDB_val key; - DKBUF; - - switch (mp->mp_flags & (P_BRANCH|P_LEAF|P_LEAF2|P_META|P_OVERFLOW|P_SUBP)) { - case P_BRANCH: type = "Branch page"; break; - case P_LEAF: type = "Leaf page"; break; - case P_LEAF|P_SUBP: type = "Sub-page"; break; - case P_LEAF|P_LEAF2: type = "LEAF2 page"; break; - case P_LEAF|P_LEAF2|P_SUBP: type = "LEAF2 sub-page"; break; - case P_OVERFLOW: - fprintf(stderr, "Overflow page %"Yu" pages %u%s\n", - pgno, mp->mp_pages, state); - return; - case P_META: - fprintf(stderr, "Meta-page %"Yu" txnid %"Yu"\n", - pgno, ((MDB_meta *)METADATA(mp))->mm_txnid); - return; - default: - fprintf(stderr, "Bad page %"Yu" flags 0x%X\n", pgno, mp->mp_flags); - return; - } - - nkeys = NUMKEYS(mp); - fprintf(stderr, "%s %"Yu" numkeys %d%s\n", type, pgno, nkeys, state); - - for (i=0; imp_pad; - key.mv_data = LEAF2KEY(mp, i, nsize); - total += nsize; - fprintf(stderr, "key %d: nsize %d, %s\n", i, nsize, DKEY(&key)); - continue; - } - node = NODEPTR(mp, i); - key.mv_size = node->mn_ksize; - key.mv_data = node->mn_data; - nsize = NODESIZE + key.mv_size; - if (IS_BRANCH(mp)) { - fprintf(stderr, "key %d: page %"Yu", %s\n", i, NODEPGNO(node), - DKEY(&key)); - total += nsize; - } else { - if (F_ISSET(node->mn_flags, F_BIGDATA)) - nsize += sizeof(pgno_t); - else - nsize += NODEDSZ(node); - total += nsize; - nsize += sizeof(indx_t); - fprintf(stderr, "key %d: nsize %d, %s%s\n", - i, nsize, DKEY(&key), mdb_leafnode_type(node)); - } - total = EVEN(total); - } - fprintf(stderr, "Total: header %d + contents %d + unused %d\n", - IS_LEAF2(mp) ? PAGEHDRSZ : PAGEBASE + mp->mp_lower, total, SIZELEFT(mp)); -} - -void -mdb_cursor_chk(MDB_cursor *mc) -{ - unsigned int i; - MDB_node *node; - MDB_page *mp; - - if (!mc->mc_snum || !(mc->mc_flags & C_INITIALIZED)) return; - for (i=0; imc_top; i++) { - mp = mc->mc_pg[i]; - node = NODEPTR(mp, mc->mc_ki[i]); - if (NODEPGNO(node) != mc->mc_pg[i+1]->mp_pgno) - printf("oops!\n"); - } - if (mc->mc_ki[i] >= NUMKEYS(mc->mc_pg[i])) - printf("ack!\n"); - if (XCURSOR_INITED(mc)) { - node = NODEPTR(mc->mc_pg[mc->mc_top], mc->mc_ki[mc->mc_top]); - if (((node->mn_flags & (F_DUPDATA|F_SUBDATA)) == F_DUPDATA) && - mc->mc_xcursor->mx_cursor.mc_pg[0] != NODEDATA(node)) { - printf("blah!\n"); - } - } -} -#endif - -#if (MDB_DEBUG) > 2 -/** Count all the pages in each DB and in the freelist - * and make sure it matches the actual number of pages - * being used. - * All named DBs must be open for a correct count. - */ -static void mdb_audit(MDB_txn *txn) -{ - MDB_cursor mc; - MDB_val key, data; - MDB_ID freecount, count; - MDB_dbi i; - int rc; - - freecount = 0; - mdb_cursor_init(&mc, txn, FREE_DBI, NULL); - while ((rc = mdb_cursor_get(&mc, &key, &data, MDB_NEXT)) == 0) - freecount += *(MDB_ID *)data.mv_data; - mdb_tassert(txn, rc == MDB_NOTFOUND); - - count = 0; - for (i = 0; imt_numdbs; i++) { - MDB_xcursor mx; - if (!(txn->mt_dbflags[i] & DB_VALID)) - continue; - mdb_cursor_init(&mc, txn, i, &mx); - if (txn->mt_dbs[i].md_root == P_INVALID) - continue; - count += txn->mt_dbs[i].md_branch_pages + - txn->mt_dbs[i].md_leaf_pages + - txn->mt_dbs[i].md_overflow_pages; - if (txn->mt_dbs[i].md_flags & MDB_DUPSORT) { - rc = mdb_page_search(&mc, NULL, MDB_PS_FIRST); - for (; rc == MDB_SUCCESS; rc = mdb_cursor_sibling(&mc, 1)) { - unsigned j; - MDB_page *mp; - mp = mc.mc_pg[mc.mc_top]; - for (j=0; jmn_flags & F_SUBDATA) { - MDB_db db; - memcpy(&db, NODEDATA(leaf), sizeof(db)); - count += db.md_branch_pages + db.md_leaf_pages + - db.md_overflow_pages; - } - } - } - mdb_tassert(txn, rc == MDB_NOTFOUND); - } - } - if (freecount + count + NUM_METAS != txn->mt_next_pgno) { - fprintf(stderr, "audit: %"Yu" freecount: %"Yu" count: %"Yu" total: %"Yu" next_pgno: %"Yu"\n", - txn->mt_txnid, freecount, count+NUM_METAS, - freecount+count+NUM_METAS, txn->mt_next_pgno); - } -} -#endif - -int -mdb_cmp(MDB_txn *txn, MDB_dbi dbi, const MDB_val *a, const MDB_val *b) -{ - return txn->mt_dbxs[dbi].md_cmp(a, b); -} - -int -mdb_dcmp(MDB_txn *txn, MDB_dbi dbi, const MDB_val *a, const MDB_val *b) -{ - MDB_cmp_func *dcmp = txn->mt_dbxs[dbi].md_dcmp; - if (NEED_CMP_CLONG(dcmp, a->mv_size)) - dcmp = mdb_cmp_clong; - return dcmp(a, b); -} - -/** Allocate memory for a page. - * Re-use old malloc'd pages first for singletons, otherwise just malloc. - * Set #MDB_TXN_ERROR on failure. - */ -static MDB_page * -mdb_page_malloc(MDB_txn *txn, unsigned num) -{ - MDB_env *env = txn->mt_env; - MDB_page *ret = env->me_dpages; - size_t psize = env->me_psize, sz = psize, off; - /* For ! #MDB_NOMEMINIT, psize counts how much to init. - * For a single page alloc, we init everything after the page header. - * For multi-page, we init the final page; if the caller needed that - * many pages they will be filling in at least up to the last page. - */ - if (num == 1) { - if (ret) { - VGMEMP_ALLOC(env, ret, sz); - VGMEMP_DEFINED(ret, sizeof(ret->mp_next)); - env->me_dpages = ret->mp_next; - return ret; - } - psize -= off = PAGEHDRSZ; - } else { - sz *= num; - off = sz - psize; - } - if ((ret = malloc(sz)) != NULL) { - VGMEMP_ALLOC(env, ret, sz); - if (!(env->me_flags & MDB_NOMEMINIT)) { - memset((char *)ret + off, 0, psize); - ret->mp_pad = 0; - } - } else { - txn->mt_flags |= MDB_TXN_ERROR; - } - return ret; -} -/** Free a single page. - * Saves single pages to a list, for future reuse. - * (This is not used for multi-page overflow pages.) - */ -static void -mdb_page_free(MDB_env *env, MDB_page *mp) -{ - mp->mp_next = env->me_dpages; - VGMEMP_FREE(env, mp); - env->me_dpages = mp; -} - -/** Free a dirty page */ -static void -mdb_dpage_free(MDB_env *env, MDB_page *dp) -{ - if (!IS_OVERFLOW(dp) || dp->mp_pages == 1) { - mdb_page_free(env, dp); - } else { - /* large pages just get freed directly */ - VGMEMP_FREE(env, dp); - free(dp); - } -} - -/** Return all dirty pages to dpage list */ -static void -mdb_dlist_free(MDB_txn *txn) -{ - MDB_env *env = txn->mt_env; - MDB_ID2L dl = txn->mt_u.dirty_list; - unsigned i, n = dl[0].mid; - - for (i = 1; i <= n; i++) { - mdb_dpage_free(env, dl[i].mptr); - } - dl[0].mid = 0; -} - -#ifdef MDB_VL32 -static void -mdb_page_unref(MDB_txn *txn, MDB_page *mp) -{ - pgno_t pgno; - MDB_ID3L tl = txn->mt_rpages; - unsigned x, rem; - if (mp->mp_flags & (P_SUBP|P_DIRTY)) - return; - rem = mp->mp_pgno & (MDB_RPAGE_CHUNK-1); - pgno = mp->mp_pgno ^ rem; - x = mdb_mid3l_search(tl, pgno); - if (x != tl[0].mid && tl[x+1].mid == mp->mp_pgno) - x++; - if (tl[x].mref) - tl[x].mref--; -} -#define MDB_PAGE_UNREF(txn, mp) mdb_page_unref(txn, mp) - -static void -mdb_cursor_unref(MDB_cursor *mc) -{ - int i; - if (mc->mc_txn->mt_rpages[0].mid) { - if (!mc->mc_snum || !mc->mc_pg[0] || IS_SUBP(mc->mc_pg[0])) - return; - for (i=0; imc_snum; i++) - mdb_page_unref(mc->mc_txn, mc->mc_pg[i]); - if (mc->mc_ovpg) { - mdb_page_unref(mc->mc_txn, mc->mc_ovpg); - mc->mc_ovpg = 0; - } - } - mc->mc_snum = mc->mc_top = 0; - mc->mc_pg[0] = NULL; - mc->mc_flags &= ~C_INITIALIZED; -} -#define MDB_CURSOR_UNREF(mc, force) \ - (((force) || ((mc)->mc_flags & C_INITIALIZED)) \ - ? mdb_cursor_unref(mc) \ - : (void)0) - -#else -#define MDB_PAGE_UNREF(txn, mp) -#define MDB_CURSOR_UNREF(mc, force) ((void)0) -#endif /* MDB_VL32 */ - -/** Loosen or free a single page. - * Saves single pages to a list for future reuse - * in this same txn. It has been pulled from the freeDB - * and already resides on the dirty list, but has been - * deleted. Use these pages first before pulling again - * from the freeDB. - * - * If the page wasn't dirtied in this txn, just add it - * to this txn's free list. - */ -static int -mdb_page_loose(MDB_cursor *mc, MDB_page *mp) -{ - int loose = 0; - pgno_t pgno = mp->mp_pgno; - MDB_txn *txn = mc->mc_txn; - - if ((mp->mp_flags & P_DIRTY) && mc->mc_dbi != FREE_DBI) { - if (txn->mt_parent) { - MDB_ID2 *dl = txn->mt_u.dirty_list; - /* If txn has a parent, make sure the page is in our - * dirty list. - */ - if (dl[0].mid) { - unsigned x = mdb_mid2l_search(dl, pgno); - if (x <= dl[0].mid && dl[x].mid == pgno) { - if (mp != dl[x].mptr) { /* bad cursor? */ - mc->mc_flags &= ~(C_INITIALIZED|C_EOF); - txn->mt_flags |= MDB_TXN_ERROR; - return MDB_PROBLEM; - } - /* ok, it's ours */ - loose = 1; - } - } - } else { - /* no parent txn, so it's just ours */ - loose = 1; - } - } - if (loose) { - DPRINTF(("loosen db %d page %"Yu, DDBI(mc), mp->mp_pgno)); - NEXT_LOOSE_PAGE(mp) = txn->mt_loose_pgs; - txn->mt_loose_pgs = mp; - txn->mt_loose_count++; - mp->mp_flags |= P_LOOSE; - } else { - int rc = mdb_midl_append(&txn->mt_free_pgs, pgno); - if (rc) - return rc; - } - - return MDB_SUCCESS; -} - -/** Set or clear P_KEEP in dirty, non-overflow, non-sub pages watched by txn. - * @param[in] mc A cursor handle for the current operation. - * @param[in] pflags Flags of the pages to update: - * P_DIRTY to set P_KEEP, P_DIRTY|P_KEEP to clear it. - * @param[in] all No shortcuts. Needed except after a full #mdb_page_flush(). - * @return 0 on success, non-zero on failure. - */ -static int -mdb_pages_xkeep(MDB_cursor *mc, unsigned pflags, int all) -{ - enum { Mask = P_SUBP|P_DIRTY|P_LOOSE|P_KEEP }; - MDB_txn *txn = mc->mc_txn; - MDB_cursor *m3, *m0 = mc; - MDB_xcursor *mx; - MDB_page *dp, *mp; - MDB_node *leaf; - unsigned i, j; - int rc = MDB_SUCCESS, level; - - /* Mark pages seen by cursors: First m0, then tracked cursors */ - for (i = txn->mt_numdbs;; ) { - if (mc->mc_flags & C_INITIALIZED) { - for (m3 = mc;; m3 = &mx->mx_cursor) { - mp = NULL; - for (j=0; jmc_snum; j++) { - mp = m3->mc_pg[j]; - if ((mp->mp_flags & Mask) == pflags) - mp->mp_flags ^= P_KEEP; - } - mx = m3->mc_xcursor; - /* Proceed to mx if it is at a sub-database */ - if (! (mx && (mx->mx_cursor.mc_flags & C_INITIALIZED))) - break; - if (! (mp && (mp->mp_flags & P_LEAF))) - break; - leaf = NODEPTR(mp, m3->mc_ki[j-1]); - if (!(leaf->mn_flags & F_SUBDATA)) - break; - } - } - mc = mc->mc_next; - for (; !mc || mc == m0; mc = txn->mt_cursors[--i]) - if (i == 0) - goto mark_done; - } - -mark_done: - if (all) { - /* Mark dirty root pages */ - for (i=0; imt_numdbs; i++) { - if (txn->mt_dbflags[i] & DB_DIRTY) { - pgno_t pgno = txn->mt_dbs[i].md_root; - if (pgno == P_INVALID) - continue; - if ((rc = mdb_page_get(m0, pgno, &dp, &level)) != MDB_SUCCESS) - break; - if ((dp->mp_flags & Mask) == pflags && level <= 1) - dp->mp_flags ^= P_KEEP; - } - } - } - - return rc; -} - -static int mdb_page_flush(MDB_txn *txn, int keep); - -/** Spill pages from the dirty list back to disk. - * This is intended to prevent running into #MDB_TXN_FULL situations, - * but note that they may still occur in a few cases: - * 1) our estimate of the txn size could be too small. Currently this - * seems unlikely, except with a large number of #MDB_MULTIPLE items. - * 2) child txns may run out of space if their parents dirtied a - * lot of pages and never spilled them. TODO: we probably should do - * a preemptive spill during #mdb_txn_begin() of a child txn, if - * the parent's dirty_room is below a given threshold. - * - * Otherwise, if not using nested txns, it is expected that apps will - * not run into #MDB_TXN_FULL any more. The pages are flushed to disk - * the same way as for a txn commit, e.g. their P_DIRTY flag is cleared. - * If the txn never references them again, they can be left alone. - * If the txn only reads them, they can be used without any fuss. - * If the txn writes them again, they can be dirtied immediately without - * going thru all of the work of #mdb_page_touch(). Such references are - * handled by #mdb_page_unspill(). - * - * Also note, we never spill DB root pages, nor pages of active cursors, - * because we'll need these back again soon anyway. And in nested txns, - * we can't spill a page in a child txn if it was already spilled in a - * parent txn. That would alter the parent txns' data even though - * the child hasn't committed yet, and we'd have no way to undo it if - * the child aborted. - * - * @param[in] m0 cursor A cursor handle identifying the transaction and - * database for which we are checking space. - * @param[in] key For a put operation, the key being stored. - * @param[in] data For a put operation, the data being stored. - * @return 0 on success, non-zero on failure. - */ -static int -mdb_page_spill(MDB_cursor *m0, MDB_val *key, MDB_val *data) -{ - MDB_txn *txn = m0->mc_txn; - MDB_page *dp; - MDB_ID2L dl = txn->mt_u.dirty_list; - unsigned int i, j, need; - int rc; - - if (m0->mc_flags & C_SUB) - return MDB_SUCCESS; - - /* Estimate how much space this op will take */ - i = m0->mc_db->md_depth; - /* Named DBs also dirty the main DB */ - if (m0->mc_dbi >= CORE_DBS) - i += txn->mt_dbs[MAIN_DBI].md_depth; - /* For puts, roughly factor in the key+data size */ - if (key) - i += (LEAFSIZE(key, data) + txn->mt_env->me_psize) / txn->mt_env->me_psize; - i += i; /* double it for good measure */ - need = i; - - if (txn->mt_dirty_room > i) - return MDB_SUCCESS; - - if (!txn->mt_spill_pgs) { - txn->mt_spill_pgs = mdb_midl_alloc(MDB_IDL_UM_MAX); - if (!txn->mt_spill_pgs) - return ENOMEM; - } else { - /* purge deleted slots */ - MDB_IDL sl = txn->mt_spill_pgs; - unsigned int num = sl[0]; - j=0; - for (i=1; i<=num; i++) { - if (!(sl[i] & 1)) - sl[++j] = sl[i]; - } - sl[0] = j; - } - - /* Preserve pages which may soon be dirtied again */ - if ((rc = mdb_pages_xkeep(m0, P_DIRTY, 1)) != MDB_SUCCESS) - goto done; - - /* Less aggressive spill - we originally spilled the entire dirty list, - * with a few exceptions for cursor pages and DB root pages. But this - * turns out to be a lot of wasted effort because in a large txn many - * of those pages will need to be used again. So now we spill only 1/8th - * of the dirty pages. Testing revealed this to be a good tradeoff, - * better than 1/2, 1/4, or 1/10. - */ - if (need < MDB_IDL_UM_MAX / 8) - need = MDB_IDL_UM_MAX / 8; - - /* Save the page IDs of all the pages we're flushing */ - /* flush from the tail forward, this saves a lot of shifting later on. */ - for (i=dl[0].mid; i && need; i--) { - MDB_ID pn = dl[i].mid << 1; - dp = dl[i].mptr; - if (dp->mp_flags & (P_LOOSE|P_KEEP)) - continue; - /* Can't spill twice, make sure it's not already in a parent's - * spill list. - */ - if (txn->mt_parent) { - MDB_txn *tx2; - for (tx2 = txn->mt_parent; tx2; tx2 = tx2->mt_parent) { - if (tx2->mt_spill_pgs) { - j = mdb_midl_search(tx2->mt_spill_pgs, pn); - if (j <= tx2->mt_spill_pgs[0] && tx2->mt_spill_pgs[j] == pn) { - dp->mp_flags |= P_KEEP; - break; - } - } - } - if (tx2) - continue; - } - if ((rc = mdb_midl_append(&txn->mt_spill_pgs, pn))) - goto done; - need--; - } - mdb_midl_sort(txn->mt_spill_pgs); - - /* Flush the spilled part of dirty list */ - if ((rc = mdb_page_flush(txn, i)) != MDB_SUCCESS) - goto done; - - /* Reset any dirty pages we kept that page_flush didn't see */ - rc = mdb_pages_xkeep(m0, P_DIRTY|P_KEEP, i); - -done: - txn->mt_flags |= rc ? MDB_TXN_ERROR : MDB_TXN_SPILLS; - return rc; -} - -/** Find oldest txnid still referenced. Expects txn->mt_txnid > 0. */ -static txnid_t -mdb_find_oldest(MDB_txn *txn) -{ - int i; - txnid_t mr, oldest = txn->mt_txnid - 1; - if (txn->mt_env->me_txns) { - MDB_reader *r = txn->mt_env->me_txns->mti_readers; - for (i = txn->mt_env->me_txns->mti_numreaders; --i >= 0; ) { - if (r[i].mr_pid) { - mr = r[i].mr_txnid; - if (oldest > mr) - oldest = mr; - } - } - } - return oldest; -} - -/** Add a page to the txn's dirty list */ -static void -mdb_page_dirty(MDB_txn *txn, MDB_page *mp) -{ - MDB_ID2 mid; - int rc, (*insert)(MDB_ID2L, MDB_ID2 *); - - if (txn->mt_flags & MDB_TXN_WRITEMAP) { - insert = mdb_mid2l_append; - } else { - insert = mdb_mid2l_insert; - } - mid.mid = mp->mp_pgno; - mid.mptr = mp; - rc = insert(txn->mt_u.dirty_list, &mid); - mdb_tassert(txn, rc == 0); - txn->mt_dirty_room--; -} - -/** Allocate page numbers and memory for writing. Maintain me_pglast, - * me_pghead and mt_next_pgno. Set #MDB_TXN_ERROR on failure. - * - * If there are free pages available from older transactions, they - * are re-used first. Otherwise allocate a new page at mt_next_pgno. - * Do not modify the freedB, just merge freeDB records into me_pghead[] - * and move me_pglast to say which records were consumed. Only this - * function can create me_pghead and move me_pglast/mt_next_pgno. - * When #MDB_DEVEL & 2, it is not affected by #mdb_freelist_save(): it - * then uses the transaction's original snapshot of the freeDB. - * @param[in] mc cursor A cursor handle identifying the transaction and - * database for which we are allocating. - * @param[in] num the number of pages to allocate. - * @param[out] mp Address of the allocated page(s). Requests for multiple pages - * will always be satisfied by a single contiguous chunk of memory. - * @return 0 on success, non-zero on failure. - */ -static int -mdb_page_alloc(MDB_cursor *mc, int num, MDB_page **mp) -{ -#ifdef MDB_PARANOID /* Seems like we can ignore this now */ - /* Get at most more freeDB records once me_pghead - * has enough pages. If not enough, use new pages from the map. - * If and mc is updating the freeDB, only get new - * records if me_pghead is empty. Then the freelist cannot play - * catch-up with itself by growing while trying to save it. - */ - enum { Paranoid = 1, Max_retries = 500 }; -#else - enum { Paranoid = 0, Max_retries = INT_MAX /*infinite*/ }; -#endif - int rc, retry = num * 60; - MDB_txn *txn = mc->mc_txn; - MDB_env *env = txn->mt_env; - pgno_t pgno, *mop = env->me_pghead; - unsigned i, j, mop_len = mop ? mop[0] : 0, n2 = num-1; - MDB_page *np; - txnid_t oldest = 0, last; - MDB_cursor_op op; - MDB_cursor m2; - int found_old = 0; - - /* If there are any loose pages, just use them */ - if (num == 1 && txn->mt_loose_pgs) { - np = txn->mt_loose_pgs; - txn->mt_loose_pgs = NEXT_LOOSE_PAGE(np); - txn->mt_loose_count--; - DPRINTF(("db %d use loose page %"Yu, DDBI(mc), np->mp_pgno)); - *mp = np; - return MDB_SUCCESS; - } - - *mp = NULL; - - /* If our dirty list is already full, we can't do anything */ - if (txn->mt_dirty_room == 0) { - rc = MDB_TXN_FULL; - goto fail; - } - - for (op = MDB_FIRST;; op = MDB_NEXT) { - MDB_val key, data; - MDB_node *leaf; - pgno_t *idl; - - /* Seek a big enough contiguous page range. Prefer - * pages at the tail, just truncating the list. - */ - if (mop_len > n2) { - i = mop_len; - do { - pgno = mop[i]; - if (mop[i-n2] == pgno+n2) - goto search_done; - } while (--i > n2); - if (--retry < 0) - break; - } - - if (op == MDB_FIRST) { /* 1st iteration */ - /* Prepare to fetch more and coalesce */ - last = env->me_pglast; - oldest = env->me_pgoldest; - mdb_cursor_init(&m2, txn, FREE_DBI, NULL); -#if (MDB_DEVEL) & 2 /* "& 2" so MDB_DEVEL=1 won't hide bugs breaking freeDB */ - /* Use original snapshot. TODO: Should need less care in code - * which modifies the database. Maybe we can delete some code? - */ - m2.mc_flags |= C_ORIG_RDONLY; - m2.mc_db = &env->me_metas[(txn->mt_txnid-1) & 1]->mm_dbs[FREE_DBI]; - m2.mc_dbflag = (unsigned char *)""; /* probably unnecessary */ -#endif - if (last) { - op = MDB_SET_RANGE; - key.mv_data = &last; /* will look up last+1 */ - key.mv_size = sizeof(last); - } - if (Paranoid && mc->mc_dbi == FREE_DBI) - retry = -1; - } - if (Paranoid && retry < 0 && mop_len) - break; - - last++; - /* Do not fetch more if the record will be too recent */ - if (oldest <= last) { - if (!found_old) { - oldest = mdb_find_oldest(txn); - env->me_pgoldest = oldest; - found_old = 1; - } - if (oldest <= last) - break; - } - rc = mdb_cursor_get(&m2, &key, NULL, op); - if (rc) { - if (rc == MDB_NOTFOUND) - break; - goto fail; - } - last = *(txnid_t*)key.mv_data; - if (oldest <= last) { - if (!found_old) { - oldest = mdb_find_oldest(txn); - env->me_pgoldest = oldest; - found_old = 1; - } - if (oldest <= last) - break; - } - np = m2.mc_pg[m2.mc_top]; - leaf = NODEPTR(np, m2.mc_ki[m2.mc_top]); - if ((rc = mdb_node_read(&m2, leaf, &data)) != MDB_SUCCESS) - goto fail; - - idl = (MDB_ID *) data.mv_data; - i = idl[0]; - if (!mop) { - if (!(env->me_pghead = mop = mdb_midl_alloc(i))) { - rc = ENOMEM; - goto fail; - } - } else { - if ((rc = mdb_midl_need(&env->me_pghead, i)) != 0) - goto fail; - mop = env->me_pghead; - } - env->me_pglast = last; -#if (MDB_DEBUG) > 1 - DPRINTF(("IDL read txn %"Yu" root %"Yu" num %u", - last, txn->mt_dbs[FREE_DBI].md_root, i)); - for (j = i; j; j--) - DPRINTF(("IDL %"Yu, idl[j])); -#endif - /* Merge in descending sorted order */ - mdb_midl_xmerge(mop, idl); - mop_len = mop[0]; - } - - /* Use new pages from the map when nothing suitable in the freeDB */ - i = 0; - pgno = txn->mt_next_pgno; - if (pgno + num >= env->me_maxpg) { - DPUTS("DB size maxed out"); - rc = MDB_MAP_FULL; - goto fail; - } -#if defined(_WIN32) && !defined(MDB_VL32) - if (!(env->me_flags & MDB_RDONLY)) { - void *p; - p = (MDB_page *)(env->me_map + env->me_psize * pgno); - p = VirtualAlloc(p, env->me_psize * num, MEM_COMMIT, - (env->me_flags & MDB_WRITEMAP) ? PAGE_READWRITE: - PAGE_READONLY); - if (!p) { - DPUTS("VirtualAlloc failed"); - rc = ErrCode(); - goto fail; - } - } -#endif - -search_done: - if (env->me_flags & MDB_WRITEMAP) { - np = (MDB_page *)(env->me_map + env->me_psize * pgno); - } else { - if (!(np = mdb_page_malloc(txn, num))) { - rc = ENOMEM; - goto fail; - } - } - if (i) { - mop[0] = mop_len -= num; - /* Move any stragglers down */ - for (j = i-num; j < mop_len; ) - mop[++j] = mop[++i]; - } else { - txn->mt_next_pgno = pgno + num; - } - np->mp_pgno = pgno; - mdb_page_dirty(txn, np); - *mp = np; - - return MDB_SUCCESS; - -fail: - txn->mt_flags |= MDB_TXN_ERROR; - return rc; -} - -/** Copy the used portions of a non-overflow page. - * @param[in] dst page to copy into - * @param[in] src page to copy from - * @param[in] psize size of a page - */ -static void -mdb_page_copy(MDB_page *dst, MDB_page *src, unsigned int psize) -{ - enum { Align = sizeof(pgno_t) }; - indx_t upper = src->mp_upper, lower = src->mp_lower, unused = upper-lower; - - /* If page isn't full, just copy the used portion. Adjust - * alignment so memcpy may copy words instead of bytes. - */ - if ((unused &= -Align) && !IS_LEAF2(src)) { - upper = (upper + PAGEBASE) & -Align; - memcpy(dst, src, (lower + PAGEBASE + (Align-1)) & -Align); - memcpy((pgno_t *)((char *)dst+upper), (pgno_t *)((char *)src+upper), - psize - upper); - } else { - memcpy(dst, src, psize - unused); - } -} - -/** Pull a page off the txn's spill list, if present. - * If a page being referenced was spilled to disk in this txn, bring - * it back and make it dirty/writable again. - * @param[in] txn the transaction handle. - * @param[in] mp the page being referenced. It must not be dirty. - * @param[out] ret the writable page, if any. ret is unchanged if - * mp wasn't spilled. - */ -static int -mdb_page_unspill(MDB_txn *txn, MDB_page *mp, MDB_page **ret) -{ - MDB_env *env = txn->mt_env; - const MDB_txn *tx2; - unsigned x; - pgno_t pgno = mp->mp_pgno, pn = pgno << 1; - - for (tx2 = txn; tx2; tx2=tx2->mt_parent) { - if (!tx2->mt_spill_pgs) - continue; - x = mdb_midl_search(tx2->mt_spill_pgs, pn); - if (x <= tx2->mt_spill_pgs[0] && tx2->mt_spill_pgs[x] == pn) { - MDB_page *np; - int num; - if (txn->mt_dirty_room == 0) - return MDB_TXN_FULL; - if (IS_OVERFLOW(mp)) - num = mp->mp_pages; - else - num = 1; - if (env->me_flags & MDB_WRITEMAP) { - np = mp; - } else { - np = mdb_page_malloc(txn, num); - if (!np) - return ENOMEM; - if (num > 1) - memcpy(np, mp, num * env->me_psize); - else - mdb_page_copy(np, mp, env->me_psize); - } - if (tx2 == txn) { - /* If in current txn, this page is no longer spilled. - * If it happens to be the last page, truncate the spill list. - * Otherwise mark it as deleted by setting the LSB. - */ - if (x == txn->mt_spill_pgs[0]) - txn->mt_spill_pgs[0]--; - else - txn->mt_spill_pgs[x] |= 1; - } /* otherwise, if belonging to a parent txn, the - * page remains spilled until child commits - */ - - mdb_page_dirty(txn, np); - np->mp_flags |= P_DIRTY; - *ret = np; - break; - } - } - return MDB_SUCCESS; -} - -/** Touch a page: make it dirty and re-insert into tree with updated pgno. - * Set #MDB_TXN_ERROR on failure. - * @param[in] mc cursor pointing to the page to be touched - * @return 0 on success, non-zero on failure. - */ -static int -mdb_page_touch(MDB_cursor *mc) -{ - MDB_page *mp = mc->mc_pg[mc->mc_top], *np; - MDB_txn *txn = mc->mc_txn; - MDB_cursor *m2, *m3; - pgno_t pgno; - int rc; - - if (!F_ISSET(mp->mp_flags, P_DIRTY)) { - if (txn->mt_flags & MDB_TXN_SPILLS) { - np = NULL; - rc = mdb_page_unspill(txn, mp, &np); - if (rc) - goto fail; - if (np) - goto done; - } - if ((rc = mdb_midl_need(&txn->mt_free_pgs, 1)) || - (rc = mdb_page_alloc(mc, 1, &np))) - goto fail; - pgno = np->mp_pgno; - DPRINTF(("touched db %d page %"Yu" -> %"Yu, DDBI(mc), - mp->mp_pgno, pgno)); - mdb_cassert(mc, mp->mp_pgno != pgno); - mdb_midl_xappend(txn->mt_free_pgs, mp->mp_pgno); - /* Update the parent page, if any, to point to the new page */ - if (mc->mc_top) { - MDB_page *parent = mc->mc_pg[mc->mc_top-1]; - MDB_node *node = NODEPTR(parent, mc->mc_ki[mc->mc_top-1]); - SETPGNO(node, pgno); - } else { - mc->mc_db->md_root = pgno; - } - } else if (txn->mt_parent && !IS_SUBP(mp)) { - MDB_ID2 mid, *dl = txn->mt_u.dirty_list; - pgno = mp->mp_pgno; - /* If txn has a parent, make sure the page is in our - * dirty list. - */ - if (dl[0].mid) { - unsigned x = mdb_mid2l_search(dl, pgno); - if (x <= dl[0].mid && dl[x].mid == pgno) { - if (mp != dl[x].mptr) { /* bad cursor? */ - mc->mc_flags &= ~(C_INITIALIZED|C_EOF); - txn->mt_flags |= MDB_TXN_ERROR; - return MDB_PROBLEM; - } - return 0; - } - } - mdb_cassert(mc, dl[0].mid < MDB_IDL_UM_MAX); - /* No - copy it */ - np = mdb_page_malloc(txn, 1); - if (!np) - return ENOMEM; - mid.mid = pgno; - mid.mptr = np; - rc = mdb_mid2l_insert(dl, &mid); - mdb_cassert(mc, rc == 0); - } else { - return 0; - } - - mdb_page_copy(np, mp, txn->mt_env->me_psize); - np->mp_pgno = pgno; - np->mp_flags |= P_DIRTY; - -done: - /* Adjust cursors pointing to mp */ - mc->mc_pg[mc->mc_top] = np; - m2 = txn->mt_cursors[mc->mc_dbi]; - if (mc->mc_flags & C_SUB) { - for (; m2; m2=m2->mc_next) { - m3 = &m2->mc_xcursor->mx_cursor; - if (m3->mc_snum < mc->mc_snum) continue; - if (m3->mc_pg[mc->mc_top] == mp) - m3->mc_pg[mc->mc_top] = np; - } - } else { - for (; m2; m2=m2->mc_next) { - if (m2->mc_snum < mc->mc_snum) continue; - if (m2 == mc) continue; - if (m2->mc_pg[mc->mc_top] == mp) { - m2->mc_pg[mc->mc_top] = np; - if (IS_LEAF(np)) - XCURSOR_REFRESH(m2, mc->mc_top, np); - } - } - } - MDB_PAGE_UNREF(mc->mc_txn, mp); - return 0; - -fail: - txn->mt_flags |= MDB_TXN_ERROR; - return rc; -} - -int -mdb_env_sync0(MDB_env *env, int force, pgno_t numpgs) -{ - int rc = 0; - if (env->me_flags & MDB_RDONLY) - return EACCES; - if (force || !F_ISSET(env->me_flags, MDB_NOSYNC)) { - if (env->me_flags & MDB_WRITEMAP) { - int flags = ((env->me_flags & MDB_MAPASYNC) && !force) - ? MS_ASYNC : MS_SYNC; - if (MDB_MSYNC(env->me_map, env->me_psize * numpgs, flags)) - rc = ErrCode(); -#ifdef _WIN32 - else if (flags == MS_SYNC && MDB_FDATASYNC(env->me_fd)) - rc = ErrCode(); -#endif - } else { -#ifdef BROKEN_FDATASYNC - if (env->me_flags & MDB_FSYNCONLY) { - if (fsync(env->me_fd)) - rc = ErrCode(); - } else -#endif - if (MDB_FDATASYNC(env->me_fd)) - rc = ErrCode(); - } - } - return rc; -} - -int -mdb_env_sync(MDB_env *env, int force) -{ - MDB_meta *m = mdb_env_pick_meta(env); - return mdb_env_sync0(env, force, m->mm_last_pg+1); -} - -/** Back up parent txn's cursors, then grab the originals for tracking */ -static int -mdb_cursor_shadow(MDB_txn *src, MDB_txn *dst) -{ - MDB_cursor *mc, *bk; - MDB_xcursor *mx; - size_t size; - int i; - - for (i = src->mt_numdbs; --i >= 0; ) { - if ((mc = src->mt_cursors[i]) != NULL) { - size = sizeof(MDB_cursor); - if (mc->mc_xcursor) - size += sizeof(MDB_xcursor); - for (; mc; mc = bk->mc_next) { - bk = malloc(size); - if (!bk) - return ENOMEM; - *bk = *mc; - mc->mc_backup = bk; - mc->mc_db = &dst->mt_dbs[i]; - /* Kill pointers into src to reduce abuse: The - * user may not use mc until dst ends. But we need a valid - * txn pointer here for cursor fixups to keep working. - */ - mc->mc_txn = dst; - mc->mc_dbflag = &dst->mt_dbflags[i]; - if ((mx = mc->mc_xcursor) != NULL) { - *(MDB_xcursor *)(bk+1) = *mx; - mx->mx_cursor.mc_txn = dst; - } - mc->mc_next = dst->mt_cursors[i]; - dst->mt_cursors[i] = mc; - } - } - } - return MDB_SUCCESS; -} - -/** Close this write txn's cursors, give parent txn's cursors back to parent. - * @param[in] txn the transaction handle. - * @param[in] merge true to keep changes to parent cursors, false to revert. - * @return 0 on success, non-zero on failure. - */ -static void -mdb_cursors_close(MDB_txn *txn, unsigned merge) -{ - MDB_cursor **cursors = txn->mt_cursors, *mc, *next, *bk; - MDB_xcursor *mx; - int i; - - for (i = txn->mt_numdbs; --i >= 0; ) { - for (mc = cursors[i]; mc; mc = next) { - next = mc->mc_next; - if ((bk = mc->mc_backup) != NULL) { - if (merge) { - /* Commit changes to parent txn */ - mc->mc_next = bk->mc_next; - mc->mc_backup = bk->mc_backup; - mc->mc_txn = bk->mc_txn; - mc->mc_db = bk->mc_db; - mc->mc_dbflag = bk->mc_dbflag; - if ((mx = mc->mc_xcursor) != NULL) - mx->mx_cursor.mc_txn = bk->mc_txn; - } else { - /* Abort nested txn */ - *mc = *bk; - if ((mx = mc->mc_xcursor) != NULL) - *mx = *(MDB_xcursor *)(bk+1); - } - mc = bk; - } - /* Only malloced cursors are permanently tracked. */ - free(mc); - } - cursors[i] = NULL; - } -} - -#if !(MDB_PIDLOCK) /* Currently the same as defined(_WIN32) */ -enum Pidlock_op { - Pidset, Pidcheck -}; -#else -enum Pidlock_op { - Pidset = F_SETLK, Pidcheck = F_GETLK -}; -#endif - -/** Set or check a pid lock. Set returns 0 on success. - * Check returns 0 if the process is certainly dead, nonzero if it may - * be alive (the lock exists or an error happened so we do not know). - * - * On Windows Pidset is a no-op, we merely check for the existence - * of the process with the given pid. On POSIX we use a single byte - * lock on the lockfile, set at an offset equal to the pid. - */ -static int -mdb_reader_pid(MDB_env *env, enum Pidlock_op op, MDB_PID_T pid) -{ -#if !(MDB_PIDLOCK) /* Currently the same as defined(_WIN32) */ - int ret = 0; - HANDLE h; - if (op == Pidcheck) { - h = OpenProcess(env->me_pidquery, FALSE, pid); - /* No documented "no such process" code, but other program use this: */ - if (!h) - return ErrCode() != ERROR_INVALID_PARAMETER; - /* A process exists until all handles to it close. Has it exited? */ - ret = WaitForSingleObject(h, 0) != 0; - CloseHandle(h); - } - return ret; -#else - for (;;) { - int rc; - struct flock lock_info; - memset(&lock_info, 0, sizeof(lock_info)); - lock_info.l_type = F_WRLCK; - lock_info.l_whence = SEEK_SET; - lock_info.l_start = pid; - lock_info.l_len = 1; - if ((rc = fcntl(env->me_lfd, op, &lock_info)) == 0) { - if (op == F_GETLK && lock_info.l_type != F_UNLCK) - rc = -1; - } else if ((rc = ErrCode()) == EINTR) { - continue; - } - return rc; - } -#endif -} - -/** Common code for #mdb_txn_begin() and #mdb_txn_renew(). - * @param[in] txn the transaction handle to initialize - * @return 0 on success, non-zero on failure. - */ -static int -mdb_txn_renew0(MDB_txn *txn) -{ - MDB_env *env = txn->mt_env; - MDB_txninfo *ti = env->me_txns; - MDB_meta *meta; - unsigned int i, nr, flags = txn->mt_flags; - uint16_t x; - int rc, new_notls = 0; - - if ((flags &= MDB_TXN_RDONLY) != 0) { - if (!ti) { - meta = mdb_env_pick_meta(env); - txn->mt_txnid = meta->mm_txnid; - txn->mt_u.reader = NULL; - } else { - MDB_reader *r = (env->me_flags & MDB_NOTLS) ? txn->mt_u.reader : - pthread_getspecific(env->me_txkey); - if (r) { - if (r->mr_pid != env->me_pid || r->mr_txnid != (txnid_t)-1) - return MDB_BAD_RSLOT; - } else { - MDB_PID_T pid = env->me_pid; - MDB_THR_T tid = pthread_self(); - mdb_mutexref_t rmutex = env->me_rmutex; - - if (!env->me_live_reader) { - rc = mdb_reader_pid(env, Pidset, pid); - if (rc) - return rc; - env->me_live_reader = 1; - } - - if (LOCK_MUTEX(rc, env, rmutex)) - return rc; - nr = ti->mti_numreaders; - for (i=0; imti_readers[i].mr_pid == 0) - break; - if (i == env->me_maxreaders) { - UNLOCK_MUTEX(rmutex); - return MDB_READERS_FULL; - } - r = &ti->mti_readers[i]; - /* Claim the reader slot, carefully since other code - * uses the reader table un-mutexed: First reset the - * slot, next publish it in mti_numreaders. After - * that, it is safe for mdb_env_close() to touch it. - * When it will be closed, we can finally claim it. - */ - r->mr_pid = 0; - r->mr_txnid = (txnid_t)-1; - r->mr_tid = tid; - if (i == nr) - ti->mti_numreaders = ++nr; - env->me_close_readers = nr; - r->mr_pid = pid; - UNLOCK_MUTEX(rmutex); - - new_notls = (env->me_flags & MDB_NOTLS); - if (!new_notls && (rc=pthread_setspecific(env->me_txkey, r))) { - r->mr_pid = 0; - return rc; - } - } - do /* LY: Retry on a race, ITS#7970. */ - r->mr_txnid = ti->mti_txnid; - while(r->mr_txnid != ti->mti_txnid); - txn->mt_txnid = r->mr_txnid; - txn->mt_u.reader = r; - meta = env->me_metas[txn->mt_txnid & 1]; - } - - } else { - /* Not yet touching txn == env->me_txn0, it may be active */ - if (ti) { - if (LOCK_MUTEX(rc, env, env->me_wmutex)) - return rc; - txn->mt_txnid = ti->mti_txnid; - meta = env->me_metas[txn->mt_txnid & 1]; - } else { - meta = mdb_env_pick_meta(env); - txn->mt_txnid = meta->mm_txnid; - } - txn->mt_txnid++; -#if MDB_DEBUG - if (txn->mt_txnid == mdb_debug_start) - mdb_debug = 1; -#endif - txn->mt_child = NULL; - txn->mt_loose_pgs = NULL; - txn->mt_loose_count = 0; - txn->mt_dirty_room = MDB_IDL_UM_MAX; - txn->mt_u.dirty_list = env->me_dirty_list; - txn->mt_u.dirty_list[0].mid = 0; - txn->mt_free_pgs = env->me_free_pgs; - txn->mt_free_pgs[0] = 0; - txn->mt_spill_pgs = NULL; - env->me_txn = txn; - memcpy(txn->mt_dbiseqs, env->me_dbiseqs, env->me_maxdbs * sizeof(unsigned int)); - } - - /* Copy the DB info and flags */ - memcpy(txn->mt_dbs, meta->mm_dbs, CORE_DBS * sizeof(MDB_db)); - - /* Moved to here to avoid a data race in read TXNs */ - txn->mt_next_pgno = meta->mm_last_pg+1; -#ifdef MDB_VL32 - txn->mt_last_pgno = txn->mt_next_pgno - 1; -#endif - - txn->mt_flags = flags; - - /* Setup db info */ - txn->mt_numdbs = env->me_numdbs; - for (i=CORE_DBS; imt_numdbs; i++) { - x = env->me_dbflags[i]; - txn->mt_dbs[i].md_flags = x & PERSISTENT_FLAGS; - txn->mt_dbflags[i] = (x & MDB_VALID) ? DB_VALID|DB_USRVALID|DB_STALE : 0; - } - txn->mt_dbflags[MAIN_DBI] = DB_VALID|DB_USRVALID; - txn->mt_dbflags[FREE_DBI] = DB_VALID; - - if (env->me_flags & MDB_FATAL_ERROR) { - DPUTS("environment had fatal error, must shutdown!"); - rc = MDB_PANIC; - } else if (env->me_maxpg < txn->mt_next_pgno) { - rc = MDB_MAP_RESIZED; - } else { - return MDB_SUCCESS; - } - mdb_txn_end(txn, new_notls /*0 or MDB_END_SLOT*/ | MDB_END_FAIL_BEGIN); - return rc; -} - -int -mdb_txn_renew(MDB_txn *txn) -{ - int rc; - - if (!txn || !F_ISSET(txn->mt_flags, MDB_TXN_RDONLY|MDB_TXN_FINISHED)) - return EINVAL; - - rc = mdb_txn_renew0(txn); - if (rc == MDB_SUCCESS) { - DPRINTF(("renew txn %"Yu"%c %p on mdbenv %p, root page %"Yu, - txn->mt_txnid, (txn->mt_flags & MDB_TXN_RDONLY) ? 'r' : 'w', - (void *)txn, (void *)txn->mt_env, txn->mt_dbs[MAIN_DBI].md_root)); - } - return rc; -} - -int -mdb_txn_begin(MDB_env *env, MDB_txn *parent, unsigned int flags, MDB_txn **ret) -{ - MDB_txn *txn; - MDB_ntxn *ntxn; - int rc, size, tsize; - - flags &= MDB_TXN_BEGIN_FLAGS; - flags |= env->me_flags & MDB_WRITEMAP; - - if (env->me_flags & MDB_RDONLY & ~flags) /* write txn in RDONLY env */ - return EACCES; - - if (parent) { - /* Nested transactions: Max 1 child, write txns only, no writemap */ - flags |= parent->mt_flags; - if (flags & (MDB_RDONLY|MDB_WRITEMAP|MDB_TXN_BLOCKED)) { - return (parent->mt_flags & MDB_TXN_RDONLY) ? EINVAL : MDB_BAD_TXN; - } - /* Child txns save MDB_pgstate and use own copy of cursors */ - size = env->me_maxdbs * (sizeof(MDB_db)+sizeof(MDB_cursor *)+1); - size += tsize = sizeof(MDB_ntxn); - } else if (flags & MDB_RDONLY) { - size = env->me_maxdbs * (sizeof(MDB_db)+1); - size += tsize = sizeof(MDB_txn); - } else { - /* Reuse preallocated write txn. However, do not touch it until - * mdb_txn_renew0() succeeds, since it currently may be active. - */ - txn = env->me_txn0; - goto renew; - } - if ((txn = calloc(1, size)) == NULL) { - DPRINTF(("calloc: %s", strerror(errno))); - return ENOMEM; - } -#ifdef MDB_VL32 - if (!parent) { - txn->mt_rpages = malloc(MDB_TRPAGE_SIZE * sizeof(MDB_ID3)); - if (!txn->mt_rpages) { - free(txn); - return ENOMEM; - } - txn->mt_rpages[0].mid = 0; - txn->mt_rpcheck = MDB_TRPAGE_SIZE/2; - } -#endif - txn->mt_dbxs = env->me_dbxs; /* static */ - txn->mt_dbs = (MDB_db *) ((char *)txn + tsize); - txn->mt_dbflags = (unsigned char *)txn + size - env->me_maxdbs; - txn->mt_flags = flags; - txn->mt_env = env; - - if (parent) { - unsigned int i; - txn->mt_cursors = (MDB_cursor **)(txn->mt_dbs + env->me_maxdbs); - txn->mt_dbiseqs = parent->mt_dbiseqs; - txn->mt_u.dirty_list = malloc(sizeof(MDB_ID2)*MDB_IDL_UM_SIZE); - if (!txn->mt_u.dirty_list || - !(txn->mt_free_pgs = mdb_midl_alloc(MDB_IDL_UM_MAX))) - { - free(txn->mt_u.dirty_list); - free(txn); - return ENOMEM; - } - txn->mt_txnid = parent->mt_txnid; - txn->mt_dirty_room = parent->mt_dirty_room; - txn->mt_u.dirty_list[0].mid = 0; - txn->mt_spill_pgs = NULL; - txn->mt_next_pgno = parent->mt_next_pgno; - parent->mt_flags |= MDB_TXN_HAS_CHILD; - parent->mt_child = txn; - txn->mt_parent = parent; - txn->mt_numdbs = parent->mt_numdbs; -#ifdef MDB_VL32 - txn->mt_rpages = parent->mt_rpages; -#endif - memcpy(txn->mt_dbs, parent->mt_dbs, txn->mt_numdbs * sizeof(MDB_db)); - /* Copy parent's mt_dbflags, but clear DB_NEW */ - for (i=0; imt_numdbs; i++) - txn->mt_dbflags[i] = parent->mt_dbflags[i] & ~DB_NEW; - rc = 0; - ntxn = (MDB_ntxn *)txn; - ntxn->mnt_pgstate = env->me_pgstate; /* save parent me_pghead & co */ - if (env->me_pghead) { - size = MDB_IDL_SIZEOF(env->me_pghead); - env->me_pghead = mdb_midl_alloc(env->me_pghead[0]); - if (env->me_pghead) - memcpy(env->me_pghead, ntxn->mnt_pgstate.mf_pghead, size); - else - rc = ENOMEM; - } - if (!rc) - rc = mdb_cursor_shadow(parent, txn); - if (rc) - mdb_txn_end(txn, MDB_END_FAIL_BEGINCHILD); - } else { /* MDB_RDONLY */ - txn->mt_dbiseqs = env->me_dbiseqs; -renew: - rc = mdb_txn_renew0(txn); - } - if (rc) { - if (txn != env->me_txn0) { -#ifdef MDB_VL32 - free(txn->mt_rpages); -#endif - free(txn); - } - } else { - txn->mt_flags |= flags; /* could not change txn=me_txn0 earlier */ - *ret = txn; - DPRINTF(("begin txn %"Yu"%c %p on mdbenv %p, root page %"Yu, - txn->mt_txnid, (flags & MDB_RDONLY) ? 'r' : 'w', - (void *) txn, (void *) env, txn->mt_dbs[MAIN_DBI].md_root)); - } - - return rc; -} - -MDB_env * -mdb_txn_env(MDB_txn *txn) -{ - if(!txn) return NULL; - return txn->mt_env; -} - -mdb_size_t -mdb_txn_id(MDB_txn *txn) -{ - if(!txn) return 0; - return txn->mt_txnid; -} - -/** Export or close DBI handles opened in this txn. */ -static void -mdb_dbis_update(MDB_txn *txn, int keep) -{ - int i; - MDB_dbi n = txn->mt_numdbs; - MDB_env *env = txn->mt_env; - unsigned char *tdbflags = txn->mt_dbflags; - - for (i = n; --i >= CORE_DBS;) { - if (tdbflags[i] & DB_NEW) { - if (keep) { - env->me_dbflags[i] = txn->mt_dbs[i].md_flags | MDB_VALID; - } else { - char *ptr = env->me_dbxs[i].md_name.mv_data; - if (ptr) { - env->me_dbxs[i].md_name.mv_data = NULL; - env->me_dbxs[i].md_name.mv_size = 0; - env->me_dbflags[i] = 0; - env->me_dbiseqs[i]++; - free(ptr); - } - } - } - } - if (keep && env->me_numdbs < n) - env->me_numdbs = n; -} - -/** End a transaction, except successful commit of a nested transaction. - * May be called twice for readonly txns: First reset it, then abort. - * @param[in] txn the transaction handle to end - * @param[in] mode why and how to end the transaction - */ -static void -mdb_txn_end(MDB_txn *txn, unsigned mode) -{ - MDB_env *env = txn->mt_env; -#if MDB_DEBUG - static const char *const names[] = MDB_END_NAMES; -#endif - - /* Export or close DBI handles opened in this txn */ - mdb_dbis_update(txn, mode & MDB_END_UPDATE); - - DPRINTF(("%s txn %"Yu"%c %p on mdbenv %p, root page %"Yu, - names[mode & MDB_END_OPMASK], - txn->mt_txnid, (txn->mt_flags & MDB_TXN_RDONLY) ? 'r' : 'w', - (void *) txn, (void *)env, txn->mt_dbs[MAIN_DBI].md_root)); - - if (F_ISSET(txn->mt_flags, MDB_TXN_RDONLY)) { - if (txn->mt_u.reader) { - txn->mt_u.reader->mr_txnid = (txnid_t)-1; - if (!(env->me_flags & MDB_NOTLS)) { - txn->mt_u.reader = NULL; /* txn does not own reader */ - } else if (mode & MDB_END_SLOT) { - txn->mt_u.reader->mr_pid = 0; - txn->mt_u.reader = NULL; - } /* else txn owns the slot until it does MDB_END_SLOT */ - } - txn->mt_numdbs = 0; /* prevent further DBI activity */ - txn->mt_flags |= MDB_TXN_FINISHED; - - } else if (!F_ISSET(txn->mt_flags, MDB_TXN_FINISHED)) { - pgno_t *pghead = env->me_pghead; - - if (!(mode & MDB_END_UPDATE)) /* !(already closed cursors) */ - mdb_cursors_close(txn, 0); - if (!(env->me_flags & MDB_WRITEMAP)) { - mdb_dlist_free(txn); - } - - txn->mt_numdbs = 0; - txn->mt_flags = MDB_TXN_FINISHED; - - if (!txn->mt_parent) { - mdb_midl_shrink(&txn->mt_free_pgs); - env->me_free_pgs = txn->mt_free_pgs; - /* me_pgstate: */ - env->me_pghead = NULL; - env->me_pglast = 0; - - env->me_txn = NULL; - mode = 0; /* txn == env->me_txn0, do not free() it */ - - /* The writer mutex was locked in mdb_txn_begin. */ - if (env->me_txns) - UNLOCK_MUTEX(env->me_wmutex); - } else { - txn->mt_parent->mt_child = NULL; - txn->mt_parent->mt_flags &= ~MDB_TXN_HAS_CHILD; - env->me_pgstate = ((MDB_ntxn *)txn)->mnt_pgstate; - mdb_midl_free(txn->mt_free_pgs); - mdb_midl_free(txn->mt_spill_pgs); - free(txn->mt_u.dirty_list); - } - - mdb_midl_free(pghead); - } -#ifdef MDB_VL32 - if (!txn->mt_parent) { - MDB_ID3L el = env->me_rpages, tl = txn->mt_rpages; - unsigned i, x, n = tl[0].mid; - pthread_mutex_lock(&env->me_rpmutex); - for (i = 1; i <= n; i++) { - if (tl[i].mid & (MDB_RPAGE_CHUNK-1)) { - /* tmp overflow pages that we didn't share in env */ - munmap(tl[i].mptr, tl[i].mcnt * env->me_psize); - } else { - x = mdb_mid3l_search(el, tl[i].mid); - if (tl[i].mptr == el[x].mptr) { - el[x].mref--; - } else { - /* another tmp overflow page */ - munmap(tl[i].mptr, tl[i].mcnt * env->me_psize); - } - } - } - pthread_mutex_unlock(&env->me_rpmutex); - tl[0].mid = 0; - if (mode & MDB_END_FREE) - free(tl); - } -#endif - if (mode & MDB_END_FREE) - free(txn); -} - -void -mdb_txn_reset(MDB_txn *txn) -{ - if (txn == NULL) - return; - - /* This call is only valid for read-only txns */ - if (!(txn->mt_flags & MDB_TXN_RDONLY)) - return; - - mdb_txn_end(txn, MDB_END_RESET); -} - -void -mdb_txn_abort(MDB_txn *txn) -{ - if (txn == NULL) - return; - - if (txn->mt_child) - mdb_txn_abort(txn->mt_child); - - mdb_txn_end(txn, MDB_END_ABORT|MDB_END_SLOT|MDB_END_FREE); -} - -/** Save the freelist as of this transaction to the freeDB. - * This changes the freelist. Keep trying until it stabilizes. - * - * When (MDB_DEVEL) & 2, the changes do not affect #mdb_page_alloc(), - * it then uses the transaction's original snapshot of the freeDB. - */ -static int -mdb_freelist_save(MDB_txn *txn) -{ - /* env->me_pghead[] can grow and shrink during this call. - * env->me_pglast and txn->mt_free_pgs[] can only grow. - * Page numbers cannot disappear from txn->mt_free_pgs[]. - */ - MDB_cursor mc; - MDB_env *env = txn->mt_env; - int rc, maxfree_1pg = env->me_maxfree_1pg, more = 1; - txnid_t pglast = 0, head_id = 0; - pgno_t freecnt = 0, *free_pgs, *mop; - ssize_t head_room = 0, total_room = 0, mop_len, clean_limit; - - mdb_cursor_init(&mc, txn, FREE_DBI, NULL); - - if (env->me_pghead) { - /* Make sure first page of freeDB is touched and on freelist */ - rc = mdb_page_search(&mc, NULL, MDB_PS_FIRST|MDB_PS_MODIFY); - if (rc && rc != MDB_NOTFOUND) - return rc; - } - - if (!env->me_pghead && txn->mt_loose_pgs) { - /* Put loose page numbers in mt_free_pgs, since - * we may be unable to return them to me_pghead. - */ - MDB_page *mp = txn->mt_loose_pgs; - if ((rc = mdb_midl_need(&txn->mt_free_pgs, txn->mt_loose_count)) != 0) - return rc; - for (; mp; mp = NEXT_LOOSE_PAGE(mp)) - mdb_midl_xappend(txn->mt_free_pgs, mp->mp_pgno); - txn->mt_loose_pgs = NULL; - txn->mt_loose_count = 0; - } - - /* MDB_RESERVE cancels meminit in ovpage malloc (when no WRITEMAP) */ - clean_limit = (env->me_flags & (MDB_NOMEMINIT|MDB_WRITEMAP)) - ? SSIZE_MAX : maxfree_1pg; - - for (;;) { - /* Come back here after each Put() in case freelist changed */ - MDB_val key, data; - pgno_t *pgs; - ssize_t j; - - /* If using records from freeDB which we have not yet - * deleted, delete them and any we reserved for me_pghead. - */ - while (pglast < env->me_pglast) { - rc = mdb_cursor_first(&mc, &key, NULL); - if (rc) - return rc; - pglast = head_id = *(txnid_t *)key.mv_data; - total_room = head_room = 0; - mdb_tassert(txn, pglast <= env->me_pglast); - rc = mdb_cursor_del(&mc, 0); - if (rc) - return rc; - } - - /* Save the IDL of pages freed by this txn, to a single record */ - if (freecnt < txn->mt_free_pgs[0]) { - if (!freecnt) { - /* Make sure last page of freeDB is touched and on freelist */ - rc = mdb_page_search(&mc, NULL, MDB_PS_LAST|MDB_PS_MODIFY); - if (rc && rc != MDB_NOTFOUND) - return rc; - } - free_pgs = txn->mt_free_pgs; - /* Write to last page of freeDB */ - key.mv_size = sizeof(txn->mt_txnid); - key.mv_data = &txn->mt_txnid; - do { - freecnt = free_pgs[0]; - data.mv_size = MDB_IDL_SIZEOF(free_pgs); - rc = mdb_cursor_put(&mc, &key, &data, MDB_RESERVE); - if (rc) - return rc; - /* Retry if mt_free_pgs[] grew during the Put() */ - free_pgs = txn->mt_free_pgs; - } while (freecnt < free_pgs[0]); - mdb_midl_sort(free_pgs); - memcpy(data.mv_data, free_pgs, data.mv_size); -#if (MDB_DEBUG) > 1 - { - unsigned int i = free_pgs[0]; - DPRINTF(("IDL write txn %"Yu" root %"Yu" num %u", - txn->mt_txnid, txn->mt_dbs[FREE_DBI].md_root, i)); - for (; i; i--) - DPRINTF(("IDL %"Yu, free_pgs[i])); - } -#endif - continue; - } - - mop = env->me_pghead; - mop_len = (mop ? mop[0] : 0) + txn->mt_loose_count; - - /* Reserve records for me_pghead[]. Split it if multi-page, - * to avoid searching freeDB for a page range. Use keys in - * range [1,me_pglast]: Smaller than txnid of oldest reader. - */ - if (total_room >= mop_len) { - if (total_room == mop_len || --more < 0) - break; - } else if (head_room >= maxfree_1pg && head_id > 1) { - /* Keep current record (overflow page), add a new one */ - head_id--; - head_room = 0; - } - /* (Re)write {key = head_id, IDL length = head_room} */ - total_room -= head_room; - head_room = mop_len - total_room; - if (head_room > maxfree_1pg && head_id > 1) { - /* Overflow multi-page for part of me_pghead */ - head_room /= head_id; /* amortize page sizes */ - head_room += maxfree_1pg - head_room % (maxfree_1pg + 1); - } else if (head_room < 0) { - /* Rare case, not bothering to delete this record */ - head_room = 0; - } - key.mv_size = sizeof(head_id); - key.mv_data = &head_id; - data.mv_size = (head_room + 1) * sizeof(pgno_t); - rc = mdb_cursor_put(&mc, &key, &data, MDB_RESERVE); - if (rc) - return rc; - /* IDL is initially empty, zero out at least the length */ - pgs = (pgno_t *)data.mv_data; - j = head_room > clean_limit ? head_room : 0; - do { - pgs[j] = 0; - } while (--j >= 0); - total_room += head_room; - } - - /* Return loose page numbers to me_pghead, though usually none are - * left at this point. The pages themselves remain in dirty_list. - */ - if (txn->mt_loose_pgs) { - MDB_page *mp = txn->mt_loose_pgs; - unsigned count = txn->mt_loose_count; - MDB_IDL loose; - /* Room for loose pages + temp IDL with same */ - if ((rc = mdb_midl_need(&env->me_pghead, 2*count+1)) != 0) - return rc; - mop = env->me_pghead; - loose = mop + MDB_IDL_ALLOCLEN(mop) - count; - for (count = 0; mp; mp = NEXT_LOOSE_PAGE(mp)) - loose[ ++count ] = mp->mp_pgno; - loose[0] = count; - mdb_midl_sort(loose); - mdb_midl_xmerge(mop, loose); - txn->mt_loose_pgs = NULL; - txn->mt_loose_count = 0; - mop_len = mop[0]; - } - - /* Fill in the reserved me_pghead records */ - rc = MDB_SUCCESS; - if (mop_len) { - MDB_val key, data; - - mop += mop_len; - rc = mdb_cursor_first(&mc, &key, &data); - for (; !rc; rc = mdb_cursor_next(&mc, &key, &data, MDB_NEXT)) { - txnid_t id = *(txnid_t *)key.mv_data; - ssize_t len = (ssize_t)(data.mv_size / sizeof(MDB_ID)) - 1; - MDB_ID save; - - mdb_tassert(txn, len >= 0 && id <= env->me_pglast); - key.mv_data = &id; - if (len > mop_len) { - len = mop_len; - data.mv_size = (len + 1) * sizeof(MDB_ID); - } - data.mv_data = mop -= len; - save = mop[0]; - mop[0] = len; - rc = mdb_cursor_put(&mc, &key, &data, MDB_CURRENT); - mop[0] = save; - if (rc || !(mop_len -= len)) - break; - } - } - return rc; -} - -/** Flush (some) dirty pages to the map, after clearing their dirty flag. - * @param[in] txn the transaction that's being committed - * @param[in] keep number of initial pages in dirty_list to keep dirty. - * @return 0 on success, non-zero on failure. - */ -static int -mdb_page_flush(MDB_txn *txn, int keep) -{ - MDB_env *env = txn->mt_env; - MDB_ID2L dl = txn->mt_u.dirty_list; - unsigned psize = env->me_psize, j; - int i, pagecount = dl[0].mid, rc; - size_t size = 0; - off_t pos = 0; - pgno_t pgno = 0; - MDB_page *dp = NULL; -#ifdef _WIN32 - OVERLAPPED ov; -#else - struct iovec iov[MDB_COMMIT_PAGES]; - ssize_t wsize = 0, wres; - off_t wpos = 0, next_pos = 1; /* impossible pos, so pos != next_pos */ - int n = 0; -#endif - - j = i = keep; - - if (env->me_flags & MDB_WRITEMAP) { - /* Clear dirty flags */ - while (++i <= pagecount) { - dp = dl[i].mptr; - /* Don't flush this page yet */ - if (dp->mp_flags & (P_LOOSE|P_KEEP)) { - dp->mp_flags &= ~P_KEEP; - dl[++j] = dl[i]; - continue; - } - dp->mp_flags &= ~P_DIRTY; - } - goto done; - } - - /* Write the pages */ - for (;;) { - if (++i <= pagecount) { - dp = dl[i].mptr; - /* Don't flush this page yet */ - if (dp->mp_flags & (P_LOOSE|P_KEEP)) { - dp->mp_flags &= ~P_KEEP; - dl[i].mid = 0; - continue; - } - pgno = dl[i].mid; - /* clear dirty flag */ - dp->mp_flags &= ~P_DIRTY; - pos = pgno * psize; - size = psize; - if (IS_OVERFLOW(dp)) size *= dp->mp_pages; - } -#ifdef _WIN32 - else break; - - /* Windows actually supports scatter/gather I/O, but only on - * unbuffered file handles. Since we're relying on the OS page - * cache for all our data, that's self-defeating. So we just - * write pages one at a time. We use the ov structure to set - * the write offset, to at least save the overhead of a Seek - * system call. - */ - DPRINTF(("committing page %"Yu, pgno)); - memset(&ov, 0, sizeof(ov)); - ov.Offset = pos & 0xffffffff; - ov.OffsetHigh = pos >> 16 >> 16; - if (!WriteFile(env->me_fd, dp, size, NULL, &ov)) { - rc = ErrCode(); - DPRINTF(("WriteFile: %d", rc)); - return rc; - } -#else - /* Write up to MDB_COMMIT_PAGES dirty pages at a time. */ - if (pos!=next_pos || n==MDB_COMMIT_PAGES || wsize+size>MAX_WRITE) { - if (n) { -retry_write: - /* Write previous page(s) */ -#ifdef MDB_USE_PWRITEV - wres = pwritev(env->me_fd, iov, n, wpos); -#else - if (n == 1) { - wres = pwrite(env->me_fd, iov[0].iov_base, wsize, wpos); - } else { -retry_seek: - if (lseek(env->me_fd, wpos, SEEK_SET) == -1) { - rc = ErrCode(); - if (rc == EINTR) - goto retry_seek; - DPRINTF(("lseek: %s", strerror(rc))); - return rc; - } - wres = writev(env->me_fd, iov, n); - } -#endif - if (wres != wsize) { - if (wres < 0) { - rc = ErrCode(); - if (rc == EINTR) - goto retry_write; - DPRINTF(("Write error: %s", strerror(rc))); - } else { - rc = EIO; /* TODO: Use which error code? */ - DPUTS("short write, filesystem full?"); - } - return rc; - } - n = 0; - } - if (i > pagecount) - break; - wpos = pos; - wsize = 0; - } - DPRINTF(("committing page %"Yu, pgno)); - next_pos = pos + size; - iov[n].iov_len = size; - iov[n].iov_base = (char *)dp; - wsize += size; - n++; -#endif /* _WIN32 */ - } -#ifdef MDB_VL32 - if (pgno > txn->mt_last_pgno) - txn->mt_last_pgno = pgno; -#endif - - /* MIPS has cache coherency issues, this is a no-op everywhere else - * Note: for any size >= on-chip cache size, entire on-chip cache is - * flushed. - */ - CACHEFLUSH(env->me_map, txn->mt_next_pgno * env->me_psize, DCACHE); - - for (i = keep; ++i <= pagecount; ) { - dp = dl[i].mptr; - /* This is a page we skipped above */ - if (!dl[i].mid) { - dl[++j] = dl[i]; - dl[j].mid = dp->mp_pgno; - continue; - } - mdb_dpage_free(env, dp); - } - -done: - i--; - txn->mt_dirty_room += i - j; - dl[0].mid = j; - return MDB_SUCCESS; -} - -int -mdb_txn_commit(MDB_txn *txn) -{ - int rc; - unsigned int i, end_mode; - MDB_env *env; - - if (txn == NULL) - return EINVAL; - - /* mdb_txn_end() mode for a commit which writes nothing */ - end_mode = MDB_END_EMPTY_COMMIT|MDB_END_UPDATE|MDB_END_SLOT|MDB_END_FREE; - - if (txn->mt_child) { - rc = mdb_txn_commit(txn->mt_child); - if (rc) - goto fail; - } - - env = txn->mt_env; - - if (F_ISSET(txn->mt_flags, MDB_TXN_RDONLY)) { - goto done; - } - - if (txn->mt_flags & (MDB_TXN_FINISHED|MDB_TXN_ERROR)) { - DPUTS("txn has failed/finished, can't commit"); - if (txn->mt_parent) - txn->mt_parent->mt_flags |= MDB_TXN_ERROR; - rc = MDB_BAD_TXN; - goto fail; - } - - if (txn->mt_parent) { - MDB_txn *parent = txn->mt_parent; - MDB_page **lp; - MDB_ID2L dst, src; - MDB_IDL pspill; - unsigned x, y, len, ps_len; - - /* Append our free list to parent's */ - rc = mdb_midl_append_list(&parent->mt_free_pgs, txn->mt_free_pgs); - if (rc) - goto fail; - mdb_midl_free(txn->mt_free_pgs); - /* Failures after this must either undo the changes - * to the parent or set MDB_TXN_ERROR in the parent. - */ - - parent->mt_next_pgno = txn->mt_next_pgno; - parent->mt_flags = txn->mt_flags; - - /* Merge our cursors into parent's and close them */ - mdb_cursors_close(txn, 1); - - /* Update parent's DB table. */ - memcpy(parent->mt_dbs, txn->mt_dbs, txn->mt_numdbs * sizeof(MDB_db)); - parent->mt_numdbs = txn->mt_numdbs; - parent->mt_dbflags[FREE_DBI] = txn->mt_dbflags[FREE_DBI]; - parent->mt_dbflags[MAIN_DBI] = txn->mt_dbflags[MAIN_DBI]; - for (i=CORE_DBS; imt_numdbs; i++) { - /* preserve parent's DB_NEW status */ - x = parent->mt_dbflags[i] & DB_NEW; - parent->mt_dbflags[i] = txn->mt_dbflags[i] | x; - } - - dst = parent->mt_u.dirty_list; - src = txn->mt_u.dirty_list; - /* Remove anything in our dirty list from parent's spill list */ - if ((pspill = parent->mt_spill_pgs) && (ps_len = pspill[0])) { - x = y = ps_len; - pspill[0] = (pgno_t)-1; - /* Mark our dirty pages as deleted in parent spill list */ - for (i=0, len=src[0].mid; ++i <= len; ) { - MDB_ID pn = src[i].mid << 1; - while (pn > pspill[x]) - x--; - if (pn == pspill[x]) { - pspill[x] = 1; - y = --x; - } - } - /* Squash deleted pagenums if we deleted any */ - for (x=y; ++x <= ps_len; ) - if (!(pspill[x] & 1)) - pspill[++y] = pspill[x]; - pspill[0] = y; - } - - /* Remove anything in our spill list from parent's dirty list */ - if (txn->mt_spill_pgs && txn->mt_spill_pgs[0]) { - for (i=1; i<=txn->mt_spill_pgs[0]; i++) { - MDB_ID pn = txn->mt_spill_pgs[i]; - if (pn & 1) - continue; /* deleted spillpg */ - pn >>= 1; - y = mdb_mid2l_search(dst, pn); - if (y <= dst[0].mid && dst[y].mid == pn) { - free(dst[y].mptr); - while (y < dst[0].mid) { - dst[y] = dst[y+1]; - y++; - } - dst[0].mid--; - } - } - } - - /* Find len = length of merging our dirty list with parent's */ - x = dst[0].mid; - dst[0].mid = 0; /* simplify loops */ - if (parent->mt_parent) { - len = x + src[0].mid; - y = mdb_mid2l_search(src, dst[x].mid + 1) - 1; - for (i = x; y && i; y--) { - pgno_t yp = src[y].mid; - while (yp < dst[i].mid) - i--; - if (yp == dst[i].mid) { - i--; - len--; - } - } - } else { /* Simplify the above for single-ancestor case */ - len = MDB_IDL_UM_MAX - txn->mt_dirty_room; - } - /* Merge our dirty list with parent's */ - y = src[0].mid; - for (i = len; y; dst[i--] = src[y--]) { - pgno_t yp = src[y].mid; - while (yp < dst[x].mid) - dst[i--] = dst[x--]; - if (yp == dst[x].mid) - free(dst[x--].mptr); - } - mdb_tassert(txn, i == x); - dst[0].mid = len; - free(txn->mt_u.dirty_list); - parent->mt_dirty_room = txn->mt_dirty_room; - if (txn->mt_spill_pgs) { - if (parent->mt_spill_pgs) { - /* TODO: Prevent failure here, so parent does not fail */ - rc = mdb_midl_append_list(&parent->mt_spill_pgs, txn->mt_spill_pgs); - if (rc) - parent->mt_flags |= MDB_TXN_ERROR; - mdb_midl_free(txn->mt_spill_pgs); - mdb_midl_sort(parent->mt_spill_pgs); - } else { - parent->mt_spill_pgs = txn->mt_spill_pgs; - } - } - - /* Append our loose page list to parent's */ - for (lp = &parent->mt_loose_pgs; *lp; lp = &NEXT_LOOSE_PAGE(*lp)) - ; - *lp = txn->mt_loose_pgs; - parent->mt_loose_count += txn->mt_loose_count; - - parent->mt_child = NULL; - mdb_midl_free(((MDB_ntxn *)txn)->mnt_pgstate.mf_pghead); - free(txn); - return rc; - } - - if (txn != env->me_txn) { - DPUTS("attempt to commit unknown transaction"); - rc = EINVAL; - goto fail; - } - - mdb_cursors_close(txn, 0); - - if (!txn->mt_u.dirty_list[0].mid && - !(txn->mt_flags & (MDB_TXN_DIRTY|MDB_TXN_SPILLS))) - goto done; - - DPRINTF(("committing txn %"Yu" %p on mdbenv %p, root page %"Yu, - txn->mt_txnid, (void*)txn, (void*)env, txn->mt_dbs[MAIN_DBI].md_root)); - - /* Update DB root pointers */ - if (txn->mt_numdbs > CORE_DBS) { - MDB_cursor mc; - MDB_dbi i; - MDB_val data; - data.mv_size = sizeof(MDB_db); - - mdb_cursor_init(&mc, txn, MAIN_DBI, NULL); - for (i = CORE_DBS; i < txn->mt_numdbs; i++) { - if (txn->mt_dbflags[i] & DB_DIRTY) { - if (TXN_DBI_CHANGED(txn, i)) { - rc = MDB_BAD_DBI; - goto fail; - } - data.mv_data = &txn->mt_dbs[i]; - rc = mdb_cursor_put(&mc, &txn->mt_dbxs[i].md_name, &data, - F_SUBDATA); - if (rc) - goto fail; - } - } - } - - rc = mdb_freelist_save(txn); - if (rc) - goto fail; - - mdb_midl_free(env->me_pghead); - env->me_pghead = NULL; - mdb_midl_shrink(&txn->mt_free_pgs); - -#if (MDB_DEBUG) > 2 - mdb_audit(txn); -#endif - - if ((rc = mdb_page_flush(txn, 0))) - goto fail; - if (!F_ISSET(txn->mt_flags, MDB_TXN_NOSYNC) && - (rc = mdb_env_sync0(env, 0, txn->mt_next_pgno))) - goto fail; - if ((rc = mdb_env_write_meta(txn))) - goto fail; - end_mode = MDB_END_COMMITTED|MDB_END_UPDATE; - -done: - mdb_txn_end(txn, end_mode); - return MDB_SUCCESS; - -fail: - mdb_txn_abort(txn); - return rc; -} - -/** Read the environment parameters of a DB environment before - * mapping it into memory. - * @param[in] env the environment handle - * @param[in] prev whether to read the backup meta page - * @param[out] meta address of where to store the meta information - * @return 0 on success, non-zero on failure. - */ -static int ESECT -mdb_env_read_header(MDB_env *env, int prev, MDB_meta *meta) -{ - MDB_metabuf pbuf; - MDB_page *p; - MDB_meta *m; - int i, rc, off; - enum { Size = sizeof(pbuf) }; - - /* We don't know the page size yet, so use a minimum value. - * Read both meta pages so we can use the latest one. - */ - - for (i=off=0; imm_psize) { -#ifdef _WIN32 - DWORD len; - OVERLAPPED ov; - memset(&ov, 0, sizeof(ov)); - ov.Offset = off; - rc = ReadFile(env->me_fd, &pbuf, Size, &len, &ov) ? (int)len : -1; - if (rc == -1 && ErrCode() == ERROR_HANDLE_EOF) - rc = 0; -#else - rc = pread(env->me_fd, &pbuf, Size, off); -#endif - if (rc != Size) { - if (rc == 0 && off == 0) - return ENOENT; - rc = rc < 0 ? (int) ErrCode() : MDB_INVALID; - DPRINTF(("read: %s", mdb_strerror(rc))); - return rc; - } - - p = (MDB_page *)&pbuf; - - if (!F_ISSET(p->mp_flags, P_META)) { - DPRINTF(("page %"Yu" not a meta page", p->mp_pgno)); - return MDB_INVALID; - } - - m = METADATA(p); - if (m->mm_magic != MDB_MAGIC) { - DPUTS("meta has invalid magic"); - return MDB_INVALID; - } - - if (m->mm_version != MDB_DATA_VERSION) { - DPRINTF(("database is version %u, expected version %u", - m->mm_version, MDB_DATA_VERSION)); - return MDB_VERSION_MISMATCH; - } - - if (off == 0 || (prev ? m->mm_txnid < meta->mm_txnid : m->mm_txnid > meta->mm_txnid)) - *meta = *m; - } - return 0; -} - -/** Fill in most of the zeroed #MDB_meta for an empty database environment */ -static void ESECT -mdb_env_init_meta0(MDB_env *env, MDB_meta *meta) -{ - meta->mm_magic = MDB_MAGIC; - meta->mm_version = MDB_DATA_VERSION; - meta->mm_mapsize = env->me_mapsize; - meta->mm_psize = env->me_psize; - meta->mm_last_pg = NUM_METAS-1; - meta->mm_flags = env->me_flags & 0xffff; - meta->mm_flags |= MDB_INTEGERKEY; /* this is mm_dbs[FREE_DBI].md_flags */ - meta->mm_dbs[FREE_DBI].md_root = P_INVALID; - meta->mm_dbs[MAIN_DBI].md_root = P_INVALID; -} - -/** Write the environment parameters of a freshly created DB environment. - * @param[in] env the environment handle - * @param[in] meta the #MDB_meta to write - * @return 0 on success, non-zero on failure. - */ -static int ESECT -mdb_env_init_meta(MDB_env *env, MDB_meta *meta) -{ - MDB_page *p, *q; - int rc; - unsigned int psize; -#ifdef _WIN32 - DWORD len; - OVERLAPPED ov; - memset(&ov, 0, sizeof(ov)); -#define DO_PWRITE(rc, fd, ptr, size, len, pos) do { \ - ov.Offset = pos; \ - rc = WriteFile(fd, ptr, size, &len, &ov); } while(0) -#else - int len; -#define DO_PWRITE(rc, fd, ptr, size, len, pos) do { \ - len = pwrite(fd, ptr, size, pos); \ - if (len == -1 && ErrCode() == EINTR) continue; \ - rc = (len >= 0); break; } while(1) -#endif - - DPUTS("writing new meta page"); - - psize = env->me_psize; - - p = calloc(NUM_METAS, psize); - if (!p) - return ENOMEM; - p->mp_pgno = 0; - p->mp_flags = P_META; - *(MDB_meta *)METADATA(p) = *meta; - - q = (MDB_page *)((char *)p + psize); - q->mp_pgno = 1; - q->mp_flags = P_META; - *(MDB_meta *)METADATA(q) = *meta; - - DO_PWRITE(rc, env->me_fd, p, psize * NUM_METAS, len, 0); - if (!rc) - rc = ErrCode(); - else if ((unsigned) len == psize * NUM_METAS) - rc = MDB_SUCCESS; - else - rc = ENOSPC; - free(p); - return rc; -} - -/** Update the environment info to commit a transaction. - * @param[in] txn the transaction that's being committed - * @return 0 on success, non-zero on failure. - */ -static int -mdb_env_write_meta(MDB_txn *txn) -{ - MDB_env *env; - MDB_meta meta, metab, *mp; - unsigned flags; - mdb_size_t mapsize; - off_t off; - int rc, len, toggle; - char *ptr; - HANDLE mfd; -#ifdef _WIN32 - OVERLAPPED ov; -#else - int r2; -#endif - - toggle = txn->mt_txnid & 1; - DPRINTF(("writing meta page %d for root page %"Yu, - toggle, txn->mt_dbs[MAIN_DBI].md_root)); - - env = txn->mt_env; - flags = txn->mt_flags | env->me_flags; - mp = env->me_metas[toggle]; - mapsize = env->me_metas[toggle ^ 1]->mm_mapsize; - /* Persist any increases of mapsize config */ - if (mapsize < env->me_mapsize) - mapsize = env->me_mapsize; - - if (flags & MDB_WRITEMAP) { - mp->mm_mapsize = mapsize; - mp->mm_dbs[FREE_DBI] = txn->mt_dbs[FREE_DBI]; - mp->mm_dbs[MAIN_DBI] = txn->mt_dbs[MAIN_DBI]; - mp->mm_last_pg = txn->mt_next_pgno - 1; -#if (__GNUC__ * 100 + __GNUC_MINOR__ >= 404) && /* TODO: portability */ \ - !(defined(__i386__) || defined(__x86_64__)) - /* LY: issue a memory barrier, if not x86. ITS#7969 */ - __sync_synchronize(); -#endif - mp->mm_txnid = txn->mt_txnid; - if (!(flags & (MDB_NOMETASYNC|MDB_NOSYNC))) { - unsigned meta_size = env->me_psize; - rc = (env->me_flags & MDB_MAPASYNC) ? MS_ASYNC : MS_SYNC; - ptr = (char *)mp - PAGEHDRSZ; -#ifndef _WIN32 /* POSIX msync() requires ptr = start of OS page */ - r2 = (ptr - env->me_map) & (env->me_os_psize - 1); - ptr -= r2; - meta_size += r2; -#endif - if (MDB_MSYNC(ptr, meta_size, rc)) { - rc = ErrCode(); - goto fail; - } - } - goto done; - } - metab.mm_txnid = mp->mm_txnid; - metab.mm_last_pg = mp->mm_last_pg; - - meta.mm_mapsize = mapsize; - meta.mm_dbs[FREE_DBI] = txn->mt_dbs[FREE_DBI]; - meta.mm_dbs[MAIN_DBI] = txn->mt_dbs[MAIN_DBI]; - meta.mm_last_pg = txn->mt_next_pgno - 1; - meta.mm_txnid = txn->mt_txnid; - - off = offsetof(MDB_meta, mm_mapsize); - ptr = (char *)&meta + off; - len = sizeof(MDB_meta) - off; - off += (char *)mp - env->me_map; - - /* Write to the SYNC fd unless MDB_NOSYNC/MDB_NOMETASYNC. - * (me_mfd goes to the same file as me_fd, but writing to it - * also syncs to disk. Avoids a separate fdatasync() call.) - */ - mfd = (flags & (MDB_NOSYNC|MDB_NOMETASYNC)) ? env->me_fd : env->me_mfd; -#ifdef _WIN32 - { - memset(&ov, 0, sizeof(ov)); - ov.Offset = off; - if (!WriteFile(mfd, ptr, len, (DWORD *)&rc, &ov)) - rc = -1; - } -#else -retry_write: - rc = pwrite(mfd, ptr, len, off); -#endif - if (rc != len) { - rc = rc < 0 ? ErrCode() : EIO; -#ifndef _WIN32 - if (rc == EINTR) - goto retry_write; -#endif - DPUTS("write failed, disk error?"); - /* On a failure, the pagecache still contains the new data. - * Write some old data back, to prevent it from being used. - * Use the non-SYNC fd; we know it will fail anyway. - */ - meta.mm_last_pg = metab.mm_last_pg; - meta.mm_txnid = metab.mm_txnid; -#ifdef _WIN32 - memset(&ov, 0, sizeof(ov)); - ov.Offset = off; - WriteFile(env->me_fd, ptr, len, NULL, &ov); -#else - r2 = pwrite(env->me_fd, ptr, len, off); - (void)r2; /* Silence warnings. We don't care about pwrite's return value */ -#endif -fail: - env->me_flags |= MDB_FATAL_ERROR; - return rc; - } - /* MIPS has cache coherency issues, this is a no-op everywhere else */ - CACHEFLUSH(env->me_map + off, len, DCACHE); -done: - /* Memory ordering issues are irrelevant; since the entire writer - * is wrapped by wmutex, all of these changes will become visible - * after the wmutex is unlocked. Since the DB is multi-version, - * readers will get consistent data regardless of how fresh or - * how stale their view of these values is. - */ - if (env->me_txns) - env->me_txns->mti_txnid = txn->mt_txnid; - - return MDB_SUCCESS; -} - -/** Check both meta pages to see which one is newer. - * @param[in] env the environment handle - * @return newest #MDB_meta. - */ -static MDB_meta * -mdb_env_pick_meta(const MDB_env *env) -{ - MDB_meta *const *metas = env->me_metas; - return metas[ metas[0]->mm_txnid < metas[1]->mm_txnid ]; -} - -int ESECT -mdb_env_create(MDB_env **env) -{ - MDB_env *e; - - e = calloc(1, sizeof(MDB_env)); - if (!e) - return ENOMEM; - - e->me_maxreaders = DEFAULT_READERS; - e->me_maxdbs = e->me_numdbs = CORE_DBS; - e->me_fd = INVALID_HANDLE_VALUE; - e->me_lfd = INVALID_HANDLE_VALUE; - e->me_mfd = INVALID_HANDLE_VALUE; -#ifdef MDB_USE_POSIX_SEM - e->me_rmutex = SEM_FAILED; - e->me_wmutex = SEM_FAILED; -#elif defined MDB_USE_SYSV_SEM - e->me_rmutex->semid = -1; - e->me_wmutex->semid = -1; -#endif - e->me_pid = getpid(); - GET_PAGESIZE(e->me_os_psize); - VGMEMP_CREATE(e,0,0); - *env = e; - return MDB_SUCCESS; -} - -#ifdef _WIN32 -/** @brief Map a result from an NTAPI call to WIN32. */ -static DWORD -mdb_nt2win32(NTSTATUS st) -{ - OVERLAPPED o = {0}; - DWORD br; - o.Internal = st; - GetOverlappedResult(NULL, &o, &br, FALSE); - return GetLastError(); -} -#endif - -static int ESECT -mdb_env_map(MDB_env *env, void *addr) -{ - MDB_page *p; - unsigned int flags = env->me_flags; -#ifdef _WIN32 - int rc; - int access = SECTION_MAP_READ; - HANDLE mh; - void *map; - SIZE_T msize; - ULONG pageprot = PAGE_READONLY, secprot, alloctype; - - if (flags & MDB_WRITEMAP) { - access |= SECTION_MAP_WRITE; - pageprot = PAGE_READWRITE; - } - if (flags & MDB_RDONLY) { - secprot = PAGE_READONLY; - msize = 0; - alloctype = 0; - } else { - secprot = PAGE_READWRITE; - msize = env->me_mapsize; - alloctype = MEM_RESERVE; - } - - rc = NtCreateSection(&mh, access, NULL, NULL, secprot, SEC_RESERVE, env->me_fd); - if (rc) - return mdb_nt2win32(rc); - map = addr; -#ifdef MDB_VL32 - msize = NUM_METAS * env->me_psize; -#endif - rc = NtMapViewOfSection(mh, GetCurrentProcess(), &map, 0, 0, NULL, &msize, ViewUnmap, alloctype, pageprot); -#ifdef MDB_VL32 - env->me_fmh = mh; -#else - NtClose(mh); -#endif - if (rc) - return mdb_nt2win32(rc); - env->me_map = map; -#else -#ifdef MDB_VL32 - (void) flags; - env->me_map = mmap(addr, NUM_METAS * env->me_psize, PROT_READ, MAP_SHARED, - env->me_fd, 0); - if (env->me_map == MAP_FAILED) { - env->me_map = NULL; - return ErrCode(); - } -#else - int prot = PROT_READ; - if (flags & MDB_WRITEMAP) { - prot |= PROT_WRITE; - if (ftruncate(env->me_fd, env->me_mapsize) < 0) - return ErrCode(); - } - env->me_map = mmap(addr, env->me_mapsize, prot, MAP_SHARED, - env->me_fd, 0); - if (env->me_map == MAP_FAILED) { - env->me_map = NULL; - return ErrCode(); - } - - if (flags & MDB_NORDAHEAD) { - /* Turn off readahead. It's harmful when the DB is larger than RAM. */ -#ifdef MADV_RANDOM - madvise(env->me_map, env->me_mapsize, MADV_RANDOM); -#else -#ifdef POSIX_MADV_RANDOM - posix_madvise(env->me_map, env->me_mapsize, POSIX_MADV_RANDOM); -#endif /* POSIX_MADV_RANDOM */ -#endif /* MADV_RANDOM */ - } -#endif /* _WIN32 */ - - /* Can happen because the address argument to mmap() is just a - * hint. mmap() can pick another, e.g. if the range is in use. - * The MAP_FIXED flag would prevent that, but then mmap could - * instead unmap existing pages to make room for the new map. - */ - if (addr && env->me_map != addr) - return EBUSY; /* TODO: Make a new MDB_* error code? */ -#endif - - p = (MDB_page *)env->me_map; - env->me_metas[0] = METADATA(p); - env->me_metas[1] = (MDB_meta *)((char *)env->me_metas[0] + env->me_psize); - - return MDB_SUCCESS; -} - -int ESECT -mdb_env_set_mapsize(MDB_env *env, mdb_size_t size) -{ - /* If env is already open, caller is responsible for making - * sure there are no active txns. - */ - if (env->me_map) { - MDB_meta *meta; -#ifndef MDB_VL32 - void *old; - int rc; -#endif - if (env->me_txn) - return EINVAL; - meta = mdb_env_pick_meta(env); - if (!size) - size = meta->mm_mapsize; - { - /* Silently round up to minimum if the size is too small */ - mdb_size_t minsize = (meta->mm_last_pg + 1) * env->me_psize; - if (size < minsize) - size = minsize; - } -#ifndef MDB_VL32 - /* For MDB_VL32 this bit is a noop since we dynamically remap - * chunks of the DB anyway. - */ - munmap(env->me_map, env->me_mapsize); - env->me_mapsize = size; - old = (env->me_flags & MDB_FIXEDMAP) ? env->me_map : NULL; - rc = mdb_env_map(env, old); - if (rc) - return rc; -#endif /* !MDB_VL32 */ - } - env->me_mapsize = size; - if (env->me_psize) - env->me_maxpg = env->me_mapsize / env->me_psize; - return MDB_SUCCESS; -} - -int ESECT -mdb_env_set_maxdbs(MDB_env *env, MDB_dbi dbs) -{ - if (env->me_map) - return EINVAL; - env->me_maxdbs = dbs + CORE_DBS; - return MDB_SUCCESS; -} - -int ESECT -mdb_env_set_maxreaders(MDB_env *env, unsigned int readers) -{ - if (env->me_map || readers < 1) - return EINVAL; - env->me_maxreaders = readers; - return MDB_SUCCESS; -} - -int ESECT -mdb_env_get_maxreaders(MDB_env *env, unsigned int *readers) -{ - if (!env || !readers) - return EINVAL; - *readers = env->me_maxreaders; - return MDB_SUCCESS; -} - -static int ESECT -mdb_fsize(HANDLE fd, mdb_size_t *size) -{ -#ifdef _WIN32 - LARGE_INTEGER fsize; - - if (!GetFileSizeEx(fd, &fsize)) - return ErrCode(); - - *size = fsize.QuadPart; -#else - struct stat st; - - if (fstat(fd, &st)) - return ErrCode(); - - *size = st.st_size; -#endif - return MDB_SUCCESS; -} - - -#ifdef _WIN32 -typedef wchar_t mdb_nchar_t; -# define MDB_NAME(str) L##str -# define mdb_name_cpy wcscpy -#else -/** Character type for file names: char on Unix, wchar_t on Windows */ -typedef char mdb_nchar_t; -# define MDB_NAME(str) str /**< #mdb_nchar_t[] string literal */ -# define mdb_name_cpy strcpy /**< Copy name (#mdb_nchar_t string) */ -#endif - -/** Filename - string of #mdb_nchar_t[] */ -typedef struct MDB_name { - int mn_len; /**< Length */ - int mn_alloced; /**< True if #mn_val was malloced */ - mdb_nchar_t *mn_val; /**< Contents */ -} MDB_name; - -/** Filename suffixes [datafile,lockfile][without,with MDB_NOSUBDIR] */ -static const mdb_nchar_t *const mdb_suffixes[2][2] = { - { MDB_NAME("/data.mdb"), MDB_NAME("") }, - { MDB_NAME("/lock.mdb"), MDB_NAME("-lock") } -}; - -#define MDB_SUFFLEN 9 /**< Max string length in #mdb_suffixes[] */ - -/** Set up filename + scratch area for filename suffix, for opening files. - * It should be freed with #mdb_fname_destroy(). - * On Windows, paths are converted from char *UTF-8 to wchar_t *UTF-16. - * - * @param[in] path Pathname for #mdb_env_open(). - * @param[in] envflags Whether a subdir and/or lockfile will be used. - * @param[out] fname Resulting filename, with room for a suffix if necessary. - */ -static int ESECT -mdb_fname_init(const char *path, unsigned envflags, MDB_name *fname) -{ - int no_suffix = F_ISSET(envflags, MDB_NOSUBDIR|MDB_NOLOCK); - fname->mn_alloced = 0; -#ifdef _WIN32 - return utf8_to_utf16(path, fname, no_suffix ? 0 : MDB_SUFFLEN); -#else - fname->mn_len = strlen(path); - if (no_suffix) - fname->mn_val = (char *) path; - else if ((fname->mn_val = malloc(fname->mn_len + MDB_SUFFLEN+1)) != NULL) { - fname->mn_alloced = 1; - strcpy(fname->mn_val, path); - } - else - return ENOMEM; - return MDB_SUCCESS; -#endif -} - -/** Destroy \b fname from #mdb_fname_init() */ -#define mdb_fname_destroy(fname) \ - do { if ((fname).mn_alloced) free((fname).mn_val); } while (0) - -#ifdef O_CLOEXEC /* POSIX.1-2008: Set FD_CLOEXEC atomically at open() */ -# define MDB_CLOEXEC O_CLOEXEC -#else -# define MDB_CLOEXEC 0 -#endif - -/** File type, access mode etc. for #mdb_fopen() */ -enum mdb_fopen_type { -#ifdef _WIN32 - MDB_O_RDONLY, MDB_O_RDWR, MDB_O_META, MDB_O_COPY, MDB_O_LOCKS -#else - /* A comment in mdb_fopen() explains some O_* flag choices. */ - MDB_O_RDONLY= O_RDONLY, /**< for RDONLY me_fd */ - MDB_O_RDWR = O_RDWR |O_CREAT, /**< for me_fd */ - MDB_O_META = O_WRONLY|MDB_DSYNC |MDB_CLOEXEC, /**< for me_mfd */ - MDB_O_COPY = O_WRONLY|O_CREAT|O_EXCL|MDB_CLOEXEC, /**< for #mdb_env_copy() */ - /** Bitmask for open() flags in enum #mdb_fopen_type. The other bits - * distinguish otherwise-equal MDB_O_* constants from each other. - */ - MDB_O_MASK = MDB_O_RDWR|MDB_CLOEXEC | MDB_O_RDONLY|MDB_O_META|MDB_O_COPY, - MDB_O_LOCKS = MDB_O_RDWR|MDB_CLOEXEC | ((MDB_O_MASK+1) & ~MDB_O_MASK) /**< for me_lfd */ -#endif -}; - -/** Open an LMDB file. - * @param[in] env The LMDB environment. - * @param[in,out] fname Path from from #mdb_fname_init(). A suffix is - * appended if necessary to create the filename, without changing mn_len. - * @param[in] which Determines file type, access mode, etc. - * @param[in] mode The Unix permissions for the file, if we create it. - * @param[out] res Resulting file handle. - * @return 0 on success, non-zero on failure. - */ -static int ESECT -mdb_fopen(const MDB_env *env, MDB_name *fname, - enum mdb_fopen_type which, mdb_mode_t mode, - HANDLE *res) -{ - int rc = MDB_SUCCESS; - HANDLE fd; -#ifdef _WIN32 - DWORD acc, share, disp, attrs; -#else - int flags; -#endif - - if (fname->mn_alloced) /* modifiable copy */ - mdb_name_cpy(fname->mn_val + fname->mn_len, - mdb_suffixes[which==MDB_O_LOCKS][F_ISSET(env->me_flags, MDB_NOSUBDIR)]); - - /* The directory must already exist. Usually the file need not. - * MDB_O_META requires the file because we already created it using - * MDB_O_RDWR. MDB_O_COPY must not overwrite an existing file. - * - * With MDB_O_COPY we do not want the OS to cache the writes, since - * the source data is already in the OS cache. - * - * The lockfile needs FD_CLOEXEC (close file descriptor on exec*()) - * to avoid the flock() issues noted under Caveats in lmdb.h. - * Also set it for other filehandles which the user cannot get at - * and close himself, which he may need after fork(). I.e. all but - * me_fd, which programs do use via mdb_env_get_fd(). - */ - -#ifdef _WIN32 - acc = GENERIC_READ|GENERIC_WRITE; - share = FILE_SHARE_READ|FILE_SHARE_WRITE; - disp = OPEN_ALWAYS; - attrs = FILE_ATTRIBUTE_NORMAL; - switch (which) { - case MDB_O_RDONLY: /* read-only datafile */ - acc = GENERIC_READ; - disp = OPEN_EXISTING; - break; - case MDB_O_META: /* for writing metapages */ - acc = GENERIC_WRITE; - disp = OPEN_EXISTING; - attrs = FILE_ATTRIBUTE_NORMAL|FILE_FLAG_WRITE_THROUGH; - break; - case MDB_O_COPY: /* mdb_env_copy() & co */ - acc = GENERIC_WRITE; - share = 0; - disp = CREATE_NEW; - attrs = FILE_FLAG_NO_BUFFERING|FILE_FLAG_WRITE_THROUGH; - break; - default: break; /* silence gcc -Wswitch (not all enum values handled) */ - } - fd = CreateFileW(fname->mn_val, acc, share, NULL, disp, attrs, NULL); -#else - fd = open(fname->mn_val, which & MDB_O_MASK, mode); -#endif - - if (fd == INVALID_HANDLE_VALUE) - rc = ErrCode(); -#ifndef _WIN32 - else { - if (which != MDB_O_RDONLY && which != MDB_O_RDWR) { - /* Set CLOEXEC if we could not pass it to open() */ - if (!MDB_CLOEXEC && (flags = fcntl(fd, F_GETFD)) != -1) - (void) fcntl(fd, F_SETFD, flags | FD_CLOEXEC); - } - if (which == MDB_O_COPY && env->me_psize >= env->me_os_psize) { - /* This may require buffer alignment. There is no portable - * way to ask how much, so we require OS pagesize alignment. - */ -# ifdef F_NOCACHE /* __APPLE__ */ - (void) fcntl(fd, F_NOCACHE, 1); -# elif defined O_DIRECT - /* open(...O_DIRECT...) would break on filesystems without - * O_DIRECT support (ITS#7682). Try to set it here instead. - */ - if ((flags = fcntl(fd, F_GETFL)) != -1) - (void) fcntl(fd, F_SETFL, flags | O_DIRECT); -# endif - } - } -#endif /* !_WIN32 */ - - *res = fd; - return rc; -} - - -#ifdef BROKEN_FDATASYNC -#include -#include -#endif - -/** Further setup required for opening an LMDB environment - */ -static int ESECT -mdb_env_open2(MDB_env *env, int prev) -{ - unsigned int flags = env->me_flags; - int i, newenv = 0, rc; - MDB_meta meta; - -#ifdef _WIN32 - /* See if we should use QueryLimited */ - rc = GetVersion(); - if ((rc & 0xff) > 5) - env->me_pidquery = MDB_PROCESS_QUERY_LIMITED_INFORMATION; - else - env->me_pidquery = PROCESS_QUERY_INFORMATION; - /* Grab functions we need from NTDLL */ - if (!NtCreateSection) { - HMODULE h = GetModuleHandle("NTDLL.DLL"); - if (!h) - return MDB_PROBLEM; - NtClose = (NtCloseFunc *)GetProcAddress(h, "NtClose"); - if (!NtClose) - return MDB_PROBLEM; - NtMapViewOfSection = (NtMapViewOfSectionFunc *)GetProcAddress(h, "NtMapViewOfSection"); - if (!NtMapViewOfSection) - return MDB_PROBLEM; - NtCreateSection = (NtCreateSectionFunc *)GetProcAddress(h, "NtCreateSection"); - if (!NtCreateSection) - return MDB_PROBLEM; - } -#endif /* _WIN32 */ - -#ifdef BROKEN_FDATASYNC - /* ext3/ext4 fdatasync is broken on some older Linux kernels. - * https://lkml.org/lkml/2012/9/3/83 - * Kernels after 3.6-rc6 are known good. - * https://lkml.org/lkml/2012/9/10/556 - * See if the DB is on ext3/ext4, then check for new enough kernel - * Kernels 2.6.32.60, 2.6.34.15, 3.2.30, and 3.5.4 are also known - * to be patched. - */ - { - struct statfs st; - fstatfs(env->me_fd, &st); - while (st.f_type == 0xEF53) { - struct utsname uts; - int i; - uname(&uts); - if (uts.release[0] < '3') { - if (!strncmp(uts.release, "2.6.32.", 7)) { - i = atoi(uts.release+7); - if (i >= 60) - break; /* 2.6.32.60 and newer is OK */ - } else if (!strncmp(uts.release, "2.6.34.", 7)) { - i = atoi(uts.release+7); - if (i >= 15) - break; /* 2.6.34.15 and newer is OK */ - } - } else if (uts.release[0] == '3') { - i = atoi(uts.release+2); - if (i > 5) - break; /* 3.6 and newer is OK */ - if (i == 5) { - i = atoi(uts.release+4); - if (i >= 4) - break; /* 3.5.4 and newer is OK */ - } else if (i == 2) { - i = atoi(uts.release+4); - if (i >= 30) - break; /* 3.2.30 and newer is OK */ - } - } else { /* 4.x and newer is OK */ - break; - } - env->me_flags |= MDB_FSYNCONLY; - break; - } - } -#endif - - if ((i = mdb_env_read_header(env, prev, &meta)) != 0) { - if (i != ENOENT) - return i; - DPUTS("new mdbenv"); - newenv = 1; - env->me_psize = env->me_os_psize; - if (env->me_psize > MAX_PAGESIZE) - env->me_psize = MAX_PAGESIZE; - memset(&meta, 0, sizeof(meta)); - mdb_env_init_meta0(env, &meta); - meta.mm_mapsize = DEFAULT_MAPSIZE; - } else { - env->me_psize = meta.mm_psize; - } - - /* Was a mapsize configured? */ - if (!env->me_mapsize) { - env->me_mapsize = meta.mm_mapsize; - } - { - /* Make sure mapsize >= committed data size. Even when using - * mm_mapsize, which could be broken in old files (ITS#7789). - */ - mdb_size_t minsize = (meta.mm_last_pg + 1) * meta.mm_psize; - if (env->me_mapsize < minsize) - env->me_mapsize = minsize; - } - meta.mm_mapsize = env->me_mapsize; - - if (newenv && !(flags & MDB_FIXEDMAP)) { - /* mdb_env_map() may grow the datafile. Write the metapages - * first, so the file will be valid if initialization fails. - * Except with FIXEDMAP, since we do not yet know mm_address. - * We could fill in mm_address later, but then a different - * program might end up doing that - one with a memory layout - * and map address which does not suit the main program. - */ - rc = mdb_env_init_meta(env, &meta); - if (rc) - return rc; - newenv = 0; - } -#ifdef _WIN32 - /* For FIXEDMAP, make sure the file is non-empty before we attempt to map it */ - if (newenv) { - char dummy = 0; - DWORD len; - rc = WriteFile(env->me_fd, &dummy, 1, &len, NULL); - if (!rc) { - rc = ErrCode(); - return rc; - } - } -#endif - - rc = mdb_env_map(env, (flags & MDB_FIXEDMAP) ? meta.mm_address : NULL); - if (rc) - return rc; - - if (newenv) { - if (flags & MDB_FIXEDMAP) - meta.mm_address = env->me_map; - i = mdb_env_init_meta(env, &meta); - if (i != MDB_SUCCESS) { - return i; - } - } - - env->me_maxfree_1pg = (env->me_psize - PAGEHDRSZ) / sizeof(pgno_t) - 1; - env->me_nodemax = (((env->me_psize - PAGEHDRSZ) / MDB_MINKEYS) & -2) - - sizeof(indx_t); -#if !(MDB_MAXKEYSIZE) - env->me_maxkey = env->me_nodemax - (NODESIZE + sizeof(MDB_db)); -#endif - env->me_maxpg = env->me_mapsize / env->me_psize; - -#if MDB_DEBUG - { - MDB_meta *meta = mdb_env_pick_meta(env); - MDB_db *db = &meta->mm_dbs[MAIN_DBI]; - - DPRINTF(("opened database version %u, pagesize %u", - meta->mm_version, env->me_psize)); - DPRINTF(("using meta page %d", (int) (meta->mm_txnid & 1))); - DPRINTF(("depth: %u", db->md_depth)); - DPRINTF(("entries: %"Yu, db->md_entries)); - DPRINTF(("branch pages: %"Yu, db->md_branch_pages)); - DPRINTF(("leaf pages: %"Yu, db->md_leaf_pages)); - DPRINTF(("overflow pages: %"Yu, db->md_overflow_pages)); - DPRINTF(("root: %"Yu, db->md_root)); - } -#endif - - return MDB_SUCCESS; -} - - -/** Release a reader thread's slot in the reader lock table. - * This function is called automatically when a thread exits. - * @param[in] ptr This points to the slot in the reader lock table. - */ -static void -mdb_env_reader_dest(void *ptr) -{ - MDB_reader *reader = ptr; - -#ifndef _WIN32 - if (reader->mr_pid == getpid()) /* catch pthread_exit() in child process */ -#endif - /* We omit the mutex, so do this atomically (i.e. skip mr_txnid) */ - reader->mr_pid = 0; -} - -#ifdef _WIN32 -/** Junk for arranging thread-specific callbacks on Windows. This is - * necessarily platform and compiler-specific. Windows supports up - * to 1088 keys. Let's assume nobody opens more than 64 environments - * in a single process, for now. They can override this if needed. - */ -#ifndef MAX_TLS_KEYS -#define MAX_TLS_KEYS 64 -#endif -static pthread_key_t mdb_tls_keys[MAX_TLS_KEYS]; -static int mdb_tls_nkeys; - -static void NTAPI mdb_tls_callback(PVOID module, DWORD reason, PVOID ptr) -{ - int i; - switch(reason) { - case DLL_PROCESS_ATTACH: break; - case DLL_THREAD_ATTACH: break; - case DLL_THREAD_DETACH: - for (i=0; ime_txns->mti_txnid = meta->mm_txnid; - -#ifdef _WIN32 - { - OVERLAPPED ov; - /* First acquire a shared lock. The Unlock will - * then release the existing exclusive lock. - */ - memset(&ov, 0, sizeof(ov)); - if (!LockFileEx(env->me_lfd, 0, 0, 1, 0, &ov)) { - rc = ErrCode(); - } else { - UnlockFile(env->me_lfd, 0, 0, 1, 0); - *excl = 0; - } - } -#else - { - struct flock lock_info; - /* The shared lock replaces the existing lock */ - memset((void *)&lock_info, 0, sizeof(lock_info)); - lock_info.l_type = F_RDLCK; - lock_info.l_whence = SEEK_SET; - lock_info.l_start = 0; - lock_info.l_len = 1; - while ((rc = fcntl(env->me_lfd, F_SETLK, &lock_info)) && - (rc = ErrCode()) == EINTR) ; - *excl = rc ? -1 : 0; /* error may mean we lost the lock */ - } -#endif - - return rc; -} - -/** Try to get exclusive lock, otherwise shared. - * Maintain *excl = -1: no/unknown lock, 0: shared, 1: exclusive. - */ -static int ESECT -mdb_env_excl_lock(MDB_env *env, int *excl) -{ - int rc = 0; -#ifdef _WIN32 - if (LockFile(env->me_lfd, 0, 0, 1, 0)) { - *excl = 1; - } else { - OVERLAPPED ov; - memset(&ov, 0, sizeof(ov)); - if (LockFileEx(env->me_lfd, 0, 0, 1, 0, &ov)) { - *excl = 0; - } else { - rc = ErrCode(); - } - } -#else - struct flock lock_info; - memset((void *)&lock_info, 0, sizeof(lock_info)); - lock_info.l_type = F_WRLCK; - lock_info.l_whence = SEEK_SET; - lock_info.l_start = 0; - lock_info.l_len = 1; - while ((rc = fcntl(env->me_lfd, F_SETLK, &lock_info)) && - (rc = ErrCode()) == EINTR) ; - if (!rc) { - *excl = 1; - } else -# ifndef MDB_USE_POSIX_MUTEX - if (*excl < 0) /* always true when MDB_USE_POSIX_MUTEX */ -# endif - { - lock_info.l_type = F_RDLCK; - while ((rc = fcntl(env->me_lfd, F_SETLKW, &lock_info)) && - (rc = ErrCode()) == EINTR) ; - if (rc == 0) - *excl = 0; - } -#endif - return rc; -} - -#ifdef MDB_USE_HASH -/* - * hash_64 - 64 bit Fowler/Noll/Vo-0 FNV-1a hash code - * - * @(#) $Revision: 5.1 $ - * @(#) $Id: hash_64a.c,v 5.1 2009/06/30 09:01:38 chongo Exp $ - * @(#) $Source: /usr/local/src/cmd/fnv/RCS/hash_64a.c,v $ - * - * http://www.isthe.com/chongo/tech/comp/fnv/index.html - * - *** - * - * Please do not copyright this code. This code is in the public domain. - * - * LANDON CURT NOLL DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, - * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO - * EVENT SHALL LANDON CURT NOLL BE LIABLE FOR ANY SPECIAL, INDIRECT OR - * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF - * USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR - * OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR - * PERFORMANCE OF THIS SOFTWARE. - * - * By: - * chongo /\oo/\ - * http://www.isthe.com/chongo/ - * - * Share and Enjoy! :-) - */ - -/** perform a 64 bit Fowler/Noll/Vo FNV-1a hash on a buffer - * @param[in] val value to hash - * @param[in] len length of value - * @return 64 bit hash - */ -static mdb_hash_t -mdb_hash(const void *val, size_t len) -{ - const unsigned char *s = (const unsigned char *) val, *end = s + len; - mdb_hash_t hval = 0xcbf29ce484222325ULL; - /* - * FNV-1a hash each octet of the buffer - */ - while (s < end) { - hval = (hval ^ *s++) * 0x100000001b3ULL; - } - /* return our new hash value */ - return hval; -} - -/** Hash the string and output the encoded hash. - * This uses modified RFC1924 Ascii85 encoding to accommodate systems with - * very short name limits. We don't care about the encoding being reversible, - * we just want to preserve as many bits of the input as possible in a - * small printable string. - * @param[in] str string to hash - * @param[out] encbuf an array of 11 chars to hold the hash - */ -static const char mdb_a85[]= "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz!#$%&()*+-;<=>?@^_`{|}~"; - -static void ESECT -mdb_pack85(unsigned long long l, char *out) -{ - int i; - - for (i=0; i<10 && l; i++) { - *out++ = mdb_a85[l % 85]; - l /= 85; - } - *out = '\0'; -} - -/** Init #MDB_env.me_mutexname[] except the char which #MUTEXNAME() will set. - * Changes to this code must be reflected in #MDB_LOCK_FORMAT. - */ -static void ESECT -mdb_env_mname_init(MDB_env *env) -{ - char *nm = env->me_mutexname; - strcpy(nm, MUTEXNAME_PREFIX); - mdb_pack85(env->me_txns->mti_mutexid, nm + sizeof(MUTEXNAME_PREFIX)); -} - -/** Return env->me_mutexname after filling in ch ('r'/'w') for convenience */ -#define MUTEXNAME(env, ch) ( \ - (void) ((env)->me_mutexname[sizeof(MUTEXNAME_PREFIX)-1] = (ch)), \ - (env)->me_mutexname) - -#endif - -/** Open and/or initialize the lock region for the environment. - * @param[in] env The LMDB environment. - * @param[in] fname Filename + scratch area, from #mdb_fname_init(). - * @param[in] mode The Unix permissions for the file, if we create it. - * @param[in,out] excl In -1, out lock type: -1 none, 0 shared, 1 exclusive - * @return 0 on success, non-zero on failure. - */ -static int ESECT -mdb_env_setup_locks(MDB_env *env, MDB_name *fname, int mode, int *excl) -{ -#ifdef _WIN32 -# define MDB_ERRCODE_ROFS ERROR_WRITE_PROTECT -#else -# define MDB_ERRCODE_ROFS EROFS -#endif -#ifdef MDB_USE_SYSV_SEM - int semid; - union semun semu; -#endif - int rc; - off_t size, rsize; - - rc = mdb_fopen(env, fname, MDB_O_LOCKS, mode, &env->me_lfd); - if (rc) { - /* Omit lockfile if read-only env on read-only filesystem */ - if (rc == MDB_ERRCODE_ROFS && (env->me_flags & MDB_RDONLY)) { - return MDB_SUCCESS; - } - goto fail; - } - - if (!(env->me_flags & MDB_NOTLS)) { - rc = pthread_key_create(&env->me_txkey, mdb_env_reader_dest); - if (rc) - goto fail; - env->me_flags |= MDB_ENV_TXKEY; -#ifdef _WIN32 - /* Windows TLS callbacks need help finding their TLS info. */ - if (mdb_tls_nkeys >= MAX_TLS_KEYS) { - rc = MDB_TLS_FULL; - goto fail; - } - mdb_tls_keys[mdb_tls_nkeys++] = env->me_txkey; -#endif - } - - /* Try to get exclusive lock. If we succeed, then - * nobody is using the lock region and we should initialize it. - */ - if ((rc = mdb_env_excl_lock(env, excl))) goto fail; - -#ifdef _WIN32 - size = GetFileSize(env->me_lfd, NULL); -#else - size = lseek(env->me_lfd, 0, SEEK_END); - if (size == -1) goto fail_errno; -#endif - rsize = (env->me_maxreaders-1) * sizeof(MDB_reader) + sizeof(MDB_txninfo); - if (size < rsize && *excl > 0) { -#ifdef _WIN32 - if (SetFilePointer(env->me_lfd, rsize, NULL, FILE_BEGIN) != (DWORD)rsize - || !SetEndOfFile(env->me_lfd)) - goto fail_errno; -#else - if (ftruncate(env->me_lfd, rsize) != 0) goto fail_errno; -#endif - } else { - rsize = size; - size = rsize - sizeof(MDB_txninfo); - env->me_maxreaders = size/sizeof(MDB_reader) + 1; - } - { -#ifdef _WIN32 - HANDLE mh; - mh = CreateFileMapping(env->me_lfd, NULL, PAGE_READWRITE, - 0, 0, NULL); - if (!mh) goto fail_errno; - env->me_txns = MapViewOfFileEx(mh, FILE_MAP_WRITE, 0, 0, rsize, NULL); - CloseHandle(mh); - if (!env->me_txns) goto fail_errno; -#else - void *m = mmap(NULL, rsize, PROT_READ|PROT_WRITE, MAP_SHARED, - env->me_lfd, 0); - if (m == MAP_FAILED) goto fail_errno; - env->me_txns = m; -#endif - } - if (*excl > 0) { -#ifdef _WIN32 - BY_HANDLE_FILE_INFORMATION stbuf; - struct { - DWORD volume; - DWORD nhigh; - DWORD nlow; - } idbuf; - - if (!mdb_sec_inited) { - InitializeSecurityDescriptor(&mdb_null_sd, - SECURITY_DESCRIPTOR_REVISION); - SetSecurityDescriptorDacl(&mdb_null_sd, TRUE, 0, FALSE); - mdb_all_sa.nLength = sizeof(SECURITY_ATTRIBUTES); - mdb_all_sa.bInheritHandle = FALSE; - mdb_all_sa.lpSecurityDescriptor = &mdb_null_sd; - mdb_sec_inited = 1; - } - if (!GetFileInformationByHandle(env->me_lfd, &stbuf)) goto fail_errno; - idbuf.volume = stbuf.dwVolumeSerialNumber; - idbuf.nhigh = stbuf.nFileIndexHigh; - idbuf.nlow = stbuf.nFileIndexLow; - env->me_txns->mti_mutexid = mdb_hash(&idbuf, sizeof(idbuf)); - mdb_env_mname_init(env); - env->me_rmutex = CreateMutexA(&mdb_all_sa, FALSE, MUTEXNAME(env, 'r')); - if (!env->me_rmutex) goto fail_errno; - env->me_wmutex = CreateMutexA(&mdb_all_sa, FALSE, MUTEXNAME(env, 'w')); - if (!env->me_wmutex) goto fail_errno; -#elif defined(MDB_USE_POSIX_SEM) - struct stat stbuf; - struct { - dev_t dev; - ino_t ino; - } idbuf; - -#if defined(__NetBSD__) -#define MDB_SHORT_SEMNAMES 1 /* limited to 14 chars */ -#endif - if (fstat(env->me_lfd, &stbuf)) goto fail_errno; - memset(&idbuf, 0, sizeof(idbuf)); - idbuf.dev = stbuf.st_dev; - idbuf.ino = stbuf.st_ino; - env->me_txns->mti_mutexid = mdb_hash(&idbuf, sizeof(idbuf)) -#ifdef MDB_SHORT_SEMNAMES - /* Max 9 base85-digits. We truncate here instead of in - * mdb_env_mname_init() to keep the latter portable. - */ - % ((mdb_hash_t)85*85*85*85*85*85*85*85*85) -#endif - ; - mdb_env_mname_init(env); - /* Clean up after a previous run, if needed: Try to - * remove both semaphores before doing anything else. - */ - sem_unlink(MUTEXNAME(env, 'r')); - sem_unlink(MUTEXNAME(env, 'w')); - env->me_rmutex = sem_open(MUTEXNAME(env, 'r'), O_CREAT|O_EXCL, mode, 1); - if (env->me_rmutex == SEM_FAILED) goto fail_errno; - env->me_wmutex = sem_open(MUTEXNAME(env, 'w'), O_CREAT|O_EXCL, mode, 1); - if (env->me_wmutex == SEM_FAILED) goto fail_errno; -#elif defined(MDB_USE_SYSV_SEM) - unsigned short vals[2] = {1, 1}; - key_t key = ftok(fname->mn_val, 'M'); /* fname is lockfile path now */ - if (key == -1) - goto fail_errno; - semid = semget(key, 2, (mode & 0777) | IPC_CREAT); - if (semid < 0) - goto fail_errno; - semu.array = vals; - if (semctl(semid, 0, SETALL, semu) < 0) - goto fail_errno; - env->me_txns->mti_semid = semid; - env->me_txns->mti_rlocked = 0; - env->me_txns->mti_wlocked = 0; -#else /* MDB_USE_POSIX_MUTEX: */ - pthread_mutexattr_t mattr; - - /* Solaris needs this before initing a robust mutex. Otherwise - * it may skip the init and return EBUSY "seems someone already - * inited" or EINVAL "it was inited differently". - */ - memset(env->me_txns->mti_rmutex, 0, sizeof(*env->me_txns->mti_rmutex)); - memset(env->me_txns->mti_wmutex, 0, sizeof(*env->me_txns->mti_wmutex)); - - if ((rc = pthread_mutexattr_init(&mattr)) != 0) - goto fail; - rc = pthread_mutexattr_setpshared(&mattr, PTHREAD_PROCESS_SHARED); -#ifdef MDB_ROBUST_SUPPORTED - if (!rc) rc = pthread_mutexattr_setrobust(&mattr, PTHREAD_MUTEX_ROBUST); -#endif - if (!rc) rc = pthread_mutex_init(env->me_txns->mti_rmutex, &mattr); - if (!rc) rc = pthread_mutex_init(env->me_txns->mti_wmutex, &mattr); - pthread_mutexattr_destroy(&mattr); - if (rc) - goto fail; -#endif /* _WIN32 || ... */ - - env->me_txns->mti_magic = MDB_MAGIC; - env->me_txns->mti_format = MDB_LOCK_FORMAT; - env->me_txns->mti_txnid = 0; - env->me_txns->mti_numreaders = 0; - - } else { -#ifdef MDB_USE_SYSV_SEM - struct semid_ds buf; -#endif - if (env->me_txns->mti_magic != MDB_MAGIC) { - DPUTS("lock region has invalid magic"); - rc = MDB_INVALID; - goto fail; - } - if (env->me_txns->mti_format != MDB_LOCK_FORMAT) { - DPRINTF(("lock region has format+version 0x%x, expected 0x%x", - env->me_txns->mti_format, MDB_LOCK_FORMAT)); - rc = MDB_VERSION_MISMATCH; - goto fail; - } - rc = ErrCode(); - if (rc && rc != EACCES && rc != EAGAIN) { - goto fail; - } -#ifdef _WIN32 - mdb_env_mname_init(env); - env->me_rmutex = OpenMutexA(SYNCHRONIZE, FALSE, MUTEXNAME(env, 'r')); - if (!env->me_rmutex) goto fail_errno; - env->me_wmutex = OpenMutexA(SYNCHRONIZE, FALSE, MUTEXNAME(env, 'w')); - if (!env->me_wmutex) goto fail_errno; -#elif defined(MDB_USE_POSIX_SEM) - mdb_env_mname_init(env); - env->me_rmutex = sem_open(MUTEXNAME(env, 'r'), 0); - if (env->me_rmutex == SEM_FAILED) goto fail_errno; - env->me_wmutex = sem_open(MUTEXNAME(env, 'w'), 0); - if (env->me_wmutex == SEM_FAILED) goto fail_errno; -#elif defined(MDB_USE_SYSV_SEM) - semid = env->me_txns->mti_semid; - semu.buf = &buf; - /* check for read access */ - if (semctl(semid, 0, IPC_STAT, semu) < 0) - goto fail_errno; - /* check for write access */ - if (semctl(semid, 0, IPC_SET, semu) < 0) - goto fail_errno; -#endif - } -#ifdef MDB_USE_SYSV_SEM - env->me_rmutex->semid = semid; - env->me_wmutex->semid = semid; - env->me_rmutex->semnum = 0; - env->me_wmutex->semnum = 1; - env->me_rmutex->locked = &env->me_txns->mti_rlocked; - env->me_wmutex->locked = &env->me_txns->mti_wlocked; -#endif - - return MDB_SUCCESS; - -fail_errno: - rc = ErrCode(); -fail: - return rc; -} - - /** Only a subset of the @ref mdb_env flags can be changed - * at runtime. Changing other flags requires closing the - * environment and re-opening it with the new flags. - */ -#define CHANGEABLE (MDB_NOSYNC|MDB_NOMETASYNC|MDB_MAPASYNC|MDB_NOMEMINIT) -#define CHANGELESS (MDB_FIXEDMAP|MDB_NOSUBDIR|MDB_RDONLY| \ - MDB_WRITEMAP|MDB_NOTLS|MDB_NOLOCK|MDB_NORDAHEAD|MDB_PREVMETA) - -#if VALID_FLAGS & PERSISTENT_FLAGS & (CHANGEABLE|CHANGELESS) -# error "Persistent DB flags & env flags overlap, but both go in mm_flags" -#endif - -int ESECT -mdb_env_open(MDB_env *env, const char *path, unsigned int flags, mdb_mode_t mode) -{ - int rc, excl = -1; - MDB_name fname; - - if (env->me_fd!=INVALID_HANDLE_VALUE || (flags & ~(CHANGEABLE|CHANGELESS))) - return EINVAL; - -#ifdef MDB_VL32 - if (flags & MDB_WRITEMAP) { - /* silently ignore WRITEMAP in 32 bit mode */ - flags ^= MDB_WRITEMAP; - } - if (flags & MDB_FIXEDMAP) { - /* cannot support FIXEDMAP */ - return EINVAL; - } -#endif - flags |= env->me_flags; - - rc = mdb_fname_init(path, flags, &fname); - if (rc) - return rc; - -#ifdef MDB_VL32 -#ifdef _WIN32 - env->me_rpmutex = CreateMutex(NULL, FALSE, NULL); - if (!env->me_rpmutex) { - rc = ErrCode(); - goto leave; - } -#else - rc = pthread_mutex_init(&env->me_rpmutex, NULL); - if (rc) - goto leave; -#endif -#endif - flags |= MDB_ENV_ACTIVE; /* tell mdb_env_close0() to clean up */ - - if (flags & MDB_RDONLY) { - /* silently ignore WRITEMAP when we're only getting read access */ - flags &= ~MDB_WRITEMAP; - } else { - if (!((env->me_free_pgs = mdb_midl_alloc(MDB_IDL_UM_MAX)) && - (env->me_dirty_list = calloc(MDB_IDL_UM_SIZE, sizeof(MDB_ID2))))) - rc = ENOMEM; - } - - env->me_flags = flags; - if (rc) - goto leave; - -#ifdef MDB_VL32 - { - env->me_rpages = malloc(MDB_ERPAGE_SIZE * sizeof(MDB_ID3)); - if (!env->me_rpages) { - rc = ENOMEM; - goto leave; - } - env->me_rpages[0].mid = 0; - env->me_rpcheck = MDB_ERPAGE_SIZE/2; - } -#endif - - env->me_path = strdup(path); - env->me_dbxs = calloc(env->me_maxdbs, sizeof(MDB_dbx)); - env->me_dbflags = calloc(env->me_maxdbs, sizeof(uint16_t)); - env->me_dbiseqs = calloc(env->me_maxdbs, sizeof(unsigned int)); - if (!(env->me_dbxs && env->me_path && env->me_dbflags && env->me_dbiseqs)) { - rc = ENOMEM; - goto leave; - } - env->me_dbxs[FREE_DBI].md_cmp = mdb_cmp_long; /* aligned MDB_INTEGERKEY */ - - /* For RDONLY, get lockfile after we know datafile exists */ - if (!(flags & (MDB_RDONLY|MDB_NOLOCK))) { - rc = mdb_env_setup_locks(env, &fname, mode, &excl); - if (rc) - goto leave; - } - - rc = mdb_fopen(env, &fname, - (flags & MDB_RDONLY) ? MDB_O_RDONLY : MDB_O_RDWR, - mode, &env->me_fd); - if (rc) - goto leave; - - if ((flags & (MDB_RDONLY|MDB_NOLOCK)) == MDB_RDONLY) { - rc = mdb_env_setup_locks(env, &fname, mode, &excl); - if (rc) - goto leave; - } - - if ((rc = mdb_env_open2(env, flags & MDB_PREVMETA)) == MDB_SUCCESS) { - if (!(flags & (MDB_RDONLY|MDB_WRITEMAP))) { - /* Synchronous fd for meta writes. Needed even with - * MDB_NOSYNC/MDB_NOMETASYNC, in case these get reset. - */ - rc = mdb_fopen(env, &fname, MDB_O_META, mode, &env->me_mfd); - if (rc) - goto leave; - } - DPRINTF(("opened dbenv %p", (void *) env)); - if (excl > 0) { - rc = mdb_env_share_locks(env, &excl); - if (rc) - goto leave; - } - if (!(flags & MDB_RDONLY)) { - MDB_txn *txn; - int tsize = sizeof(MDB_txn), size = tsize + env->me_maxdbs * - (sizeof(MDB_db)+sizeof(MDB_cursor *)+sizeof(unsigned int)+1); - if ((env->me_pbuf = calloc(1, env->me_psize)) && - (txn = calloc(1, size))) - { - txn->mt_dbs = (MDB_db *)((char *)txn + tsize); - txn->mt_cursors = (MDB_cursor **)(txn->mt_dbs + env->me_maxdbs); - txn->mt_dbiseqs = (unsigned int *)(txn->mt_cursors + env->me_maxdbs); - txn->mt_dbflags = (unsigned char *)(txn->mt_dbiseqs + env->me_maxdbs); - txn->mt_env = env; -#ifdef MDB_VL32 - txn->mt_rpages = malloc(MDB_TRPAGE_SIZE * sizeof(MDB_ID3)); - if (!txn->mt_rpages) { - free(txn); - rc = ENOMEM; - goto leave; - } - txn->mt_rpages[0].mid = 0; - txn->mt_rpcheck = MDB_TRPAGE_SIZE/2; -#endif - txn->mt_dbxs = env->me_dbxs; - txn->mt_flags = MDB_TXN_FINISHED; - env->me_txn0 = txn; - } else { - rc = ENOMEM; - } - } - } - -leave: - if (rc) { - mdb_env_close0(env, excl); - } - mdb_fname_destroy(fname); - return rc; -} - -/** Destroy resources from mdb_env_open(), clear our readers & DBIs */ -static void ESECT -mdb_env_close0(MDB_env *env, int excl) -{ - int i; - - if (!(env->me_flags & MDB_ENV_ACTIVE)) - return; - - /* Doing this here since me_dbxs may not exist during mdb_env_close */ - if (env->me_dbxs) { - for (i = env->me_maxdbs; --i >= CORE_DBS; ) - free(env->me_dbxs[i].md_name.mv_data); - free(env->me_dbxs); - } - - free(env->me_pbuf); - free(env->me_dbiseqs); - free(env->me_dbflags); - free(env->me_path); - free(env->me_dirty_list); -#ifdef MDB_VL32 - if (env->me_txn0 && env->me_txn0->mt_rpages) - free(env->me_txn0->mt_rpages); - if (env->me_rpages) { - MDB_ID3L el = env->me_rpages; - unsigned int x; - for (x=1; x<=el[0].mid; x++) - munmap(el[x].mptr, el[x].mcnt * env->me_psize); - free(el); - } -#endif - free(env->me_txn0); - mdb_midl_free(env->me_free_pgs); - - if (env->me_flags & MDB_ENV_TXKEY) { - pthread_key_delete(env->me_txkey); -#ifdef _WIN32 - /* Delete our key from the global list */ - for (i=0; ime_txkey) { - mdb_tls_keys[i] = mdb_tls_keys[mdb_tls_nkeys-1]; - mdb_tls_nkeys--; - break; - } -#endif - } - - if (env->me_map) { -#ifdef MDB_VL32 - munmap(env->me_map, NUM_METAS*env->me_psize); -#else - munmap(env->me_map, env->me_mapsize); -#endif - } - if (env->me_mfd != INVALID_HANDLE_VALUE) - (void) close(env->me_mfd); - if (env->me_fd != INVALID_HANDLE_VALUE) - (void) close(env->me_fd); - if (env->me_txns) { - MDB_PID_T pid = getpid(); - /* Clearing readers is done in this function because - * me_txkey with its destructor must be disabled first. - * - * We skip the the reader mutex, so we touch only - * data owned by this process (me_close_readers and - * our readers), and clear each reader atomically. - */ - for (i = env->me_close_readers; --i >= 0; ) - if (env->me_txns->mti_readers[i].mr_pid == pid) - env->me_txns->mti_readers[i].mr_pid = 0; -#ifdef _WIN32 - if (env->me_rmutex) { - CloseHandle(env->me_rmutex); - if (env->me_wmutex) CloseHandle(env->me_wmutex); - } - /* Windows automatically destroys the mutexes when - * the last handle closes. - */ -#elif defined(MDB_USE_POSIX_SEM) - if (env->me_rmutex != SEM_FAILED) { - sem_close(env->me_rmutex); - if (env->me_wmutex != SEM_FAILED) - sem_close(env->me_wmutex); - /* If we have the filelock: If we are the - * only remaining user, clean up semaphores. - */ - if (excl == 0) - mdb_env_excl_lock(env, &excl); - if (excl > 0) { - sem_unlink(MUTEXNAME(env, 'r')); - sem_unlink(MUTEXNAME(env, 'w')); - } - } -#elif defined(MDB_USE_SYSV_SEM) - if (env->me_rmutex->semid != -1) { - /* If we have the filelock: If we are the - * only remaining user, clean up semaphores. - */ - if (excl == 0) - mdb_env_excl_lock(env, &excl); - if (excl > 0) - semctl(env->me_rmutex->semid, 0, IPC_RMID); - } -#endif - munmap((void *)env->me_txns, (env->me_maxreaders-1)*sizeof(MDB_reader)+sizeof(MDB_txninfo)); - } - if (env->me_lfd != INVALID_HANDLE_VALUE) { -#ifdef _WIN32 - if (excl >= 0) { - /* Unlock the lockfile. Windows would have unlocked it - * after closing anyway, but not necessarily at once. - */ - UnlockFile(env->me_lfd, 0, 0, 1, 0); - } -#endif - (void) close(env->me_lfd); - } -#ifdef MDB_VL32 -#ifdef _WIN32 - if (env->me_fmh) CloseHandle(env->me_fmh); - if (env->me_rpmutex) CloseHandle(env->me_rpmutex); -#else - pthread_mutex_destroy(&env->me_rpmutex); -#endif -#endif - - env->me_flags &= ~(MDB_ENV_ACTIVE|MDB_ENV_TXKEY); -} - -void ESECT -mdb_env_close(MDB_env *env) -{ - MDB_page *dp; - - if (env == NULL) - return; - - VGMEMP_DESTROY(env); - while ((dp = env->me_dpages) != NULL) { - VGMEMP_DEFINED(&dp->mp_next, sizeof(dp->mp_next)); - env->me_dpages = dp->mp_next; - free(dp); - } - - mdb_env_close0(env, 0); - free(env); -} - -/** Compare two items pointing at aligned #mdb_size_t's */ -static int -mdb_cmp_long(const MDB_val *a, const MDB_val *b) -{ - return (*(mdb_size_t *)a->mv_data < *(mdb_size_t *)b->mv_data) ? -1 : - *(mdb_size_t *)a->mv_data > *(mdb_size_t *)b->mv_data; -} - -/** Compare two items pointing at aligned unsigned int's. - * - * This is also set as #MDB_INTEGERDUP|#MDB_DUPFIXED's #MDB_dbx.%md_dcmp, - * but #mdb_cmp_clong() is called instead if the data type is #mdb_size_t. - */ -static int -mdb_cmp_int(const MDB_val *a, const MDB_val *b) -{ - return (*(unsigned int *)a->mv_data < *(unsigned int *)b->mv_data) ? -1 : - *(unsigned int *)a->mv_data > *(unsigned int *)b->mv_data; -} - -/** Compare two items pointing at unsigned ints of unknown alignment. - * Nodes and keys are guaranteed to be 2-byte aligned. - */ -static int -mdb_cmp_cint(const MDB_val *a, const MDB_val *b) -{ -#if BYTE_ORDER == LITTLE_ENDIAN - unsigned short *u, *c; - int x; - - u = (unsigned short *) ((char *) a->mv_data + a->mv_size); - c = (unsigned short *) ((char *) b->mv_data + a->mv_size); - do { - x = *--u - *--c; - } while(!x && u > (unsigned short *)a->mv_data); - return x; -#else - unsigned short *u, *c, *end; - int x; - - end = (unsigned short *) ((char *) a->mv_data + a->mv_size); - u = (unsigned short *)a->mv_data; - c = (unsigned short *)b->mv_data; - do { - x = *u++ - *c++; - } while(!x && u < end); - return x; -#endif -} - -/** Compare two items lexically */ -static int -mdb_cmp_memn(const MDB_val *a, const MDB_val *b) -{ - int diff; - ssize_t len_diff; - unsigned int len; - - len = a->mv_size; - len_diff = (ssize_t) a->mv_size - (ssize_t) b->mv_size; - if (len_diff > 0) { - len = b->mv_size; - len_diff = 1; - } - - diff = memcmp(a->mv_data, b->mv_data, len); - return diff ? diff : len_diff<0 ? -1 : len_diff; -} - -/** Compare two items in reverse byte order */ -static int -mdb_cmp_memnr(const MDB_val *a, const MDB_val *b) -{ - const unsigned char *p1, *p2, *p1_lim; - ssize_t len_diff; - int diff; - - p1_lim = (const unsigned char *)a->mv_data; - p1 = (const unsigned char *)a->mv_data + a->mv_size; - p2 = (const unsigned char *)b->mv_data + b->mv_size; - - len_diff = (ssize_t) a->mv_size - (ssize_t) b->mv_size; - if (len_diff > 0) { - p1_lim += len_diff; - len_diff = 1; - } - - while (p1 > p1_lim) { - diff = *--p1 - *--p2; - if (diff) - return diff; - } - return len_diff<0 ? -1 : len_diff; -} - -/** Search for key within a page, using binary search. - * Returns the smallest entry larger or equal to the key. - * If exactp is non-null, stores whether the found entry was an exact match - * in *exactp (1 or 0). - * Updates the cursor index with the index of the found entry. - * If no entry larger or equal to the key is found, returns NULL. - */ -static MDB_node * -mdb_node_search(MDB_cursor *mc, MDB_val *key, int *exactp) -{ - unsigned int i = 0, nkeys; - int low, high; - int rc = 0; - MDB_page *mp = mc->mc_pg[mc->mc_top]; - MDB_node *node = NULL; - MDB_val nodekey; - MDB_cmp_func *cmp; - DKBUF; - - nkeys = NUMKEYS(mp); - - DPRINTF(("searching %u keys in %s %spage %"Yu, - nkeys, IS_LEAF(mp) ? "leaf" : "branch", IS_SUBP(mp) ? "sub-" : "", - mdb_dbg_pgno(mp))); - - low = IS_LEAF(mp) ? 0 : 1; - high = nkeys - 1; - cmp = mc->mc_dbx->md_cmp; - - /* Branch pages have no data, so if using integer keys, - * alignment is guaranteed. Use faster mdb_cmp_int. - */ - if (cmp == mdb_cmp_cint && IS_BRANCH(mp)) { - if (NODEPTR(mp, 1)->mn_ksize == sizeof(mdb_size_t)) - cmp = mdb_cmp_long; - else - cmp = mdb_cmp_int; - } - - if (IS_LEAF2(mp)) { - nodekey.mv_size = mc->mc_db->md_pad; - node = NODEPTR(mp, 0); /* fake */ - while (low <= high) { - i = (low + high) >> 1; - nodekey.mv_data = LEAF2KEY(mp, i, nodekey.mv_size); - rc = cmp(key, &nodekey); - DPRINTF(("found leaf index %u [%s], rc = %i", - i, DKEY(&nodekey), rc)); - if (rc == 0) - break; - if (rc > 0) - low = i + 1; - else - high = i - 1; - } - } else { - while (low <= high) { - i = (low + high) >> 1; - - node = NODEPTR(mp, i); - nodekey.mv_size = NODEKSZ(node); - nodekey.mv_data = NODEKEY(node); - - rc = cmp(key, &nodekey); -#if MDB_DEBUG - if (IS_LEAF(mp)) - DPRINTF(("found leaf index %u [%s], rc = %i", - i, DKEY(&nodekey), rc)); - else - DPRINTF(("found branch index %u [%s -> %"Yu"], rc = %i", - i, DKEY(&nodekey), NODEPGNO(node), rc)); -#endif - if (rc == 0) - break; - if (rc > 0) - low = i + 1; - else - high = i - 1; - } - } - - if (rc > 0) { /* Found entry is less than the key. */ - i++; /* Skip to get the smallest entry larger than key. */ - if (!IS_LEAF2(mp)) - node = NODEPTR(mp, i); - } - if (exactp) - *exactp = (rc == 0 && nkeys > 0); - /* store the key index */ - mc->mc_ki[mc->mc_top] = i; - if (i >= nkeys) - /* There is no entry larger or equal to the key. */ - return NULL; - - /* nodeptr is fake for LEAF2 */ - return node; -} - -#if 0 -static void -mdb_cursor_adjust(MDB_cursor *mc, func) -{ - MDB_cursor *m2; - - for (m2 = mc->mc_txn->mt_cursors[mc->mc_dbi]; m2; m2=m2->mc_next) { - if (m2->mc_pg[m2->mc_top] == mc->mc_pg[mc->mc_top]) { - func(mc, m2); - } - } -} -#endif - -/** Pop a page off the top of the cursor's stack. */ -static void -mdb_cursor_pop(MDB_cursor *mc) -{ - if (mc->mc_snum) { - DPRINTF(("popping page %"Yu" off db %d cursor %p", - mc->mc_pg[mc->mc_top]->mp_pgno, DDBI(mc), (void *) mc)); - - mc->mc_snum--; - if (mc->mc_snum) { - mc->mc_top--; - } else { - mc->mc_flags &= ~C_INITIALIZED; - } - } -} - -/** Push a page onto the top of the cursor's stack. - * Set #MDB_TXN_ERROR on failure. - */ -static int -mdb_cursor_push(MDB_cursor *mc, MDB_page *mp) -{ - DPRINTF(("pushing page %"Yu" on db %d cursor %p", mp->mp_pgno, - DDBI(mc), (void *) mc)); - - if (mc->mc_snum >= CURSOR_STACK) { - mc->mc_txn->mt_flags |= MDB_TXN_ERROR; - return MDB_CURSOR_FULL; - } - - mc->mc_top = mc->mc_snum++; - mc->mc_pg[mc->mc_top] = mp; - mc->mc_ki[mc->mc_top] = 0; - - return MDB_SUCCESS; -} - -#ifdef MDB_VL32 -/** Map a read-only page. - * There are two levels of tracking in use, a per-txn list and a per-env list. - * ref'ing and unref'ing the per-txn list is faster since it requires no - * locking. Pages are cached in the per-env list for global reuse, and a lock - * is required. Pages are not immediately unmapped when their refcnt goes to - * zero; they hang around in case they will be reused again soon. - * - * When the per-txn list gets full, all pages with refcnt=0 are purged from the - * list and their refcnts in the per-env list are decremented. - * - * When the per-env list gets full, all pages with refcnt=0 are purged from the - * list and their pages are unmapped. - * - * @note "full" means the list has reached its respective rpcheck threshold. - * This threshold slowly raises if no pages could be purged on a given check, - * and returns to its original value when enough pages were purged. - * - * If purging doesn't free any slots, filling the per-txn list will return - * MDB_TXN_FULL, and filling the per-env list returns MDB_MAP_FULL. - * - * Reference tracking in a txn is imperfect, pages can linger with non-zero - * refcnt even without active references. It was deemed to be too invasive - * to add unrefs in every required location. However, all pages are unref'd - * at the end of the transaction. This guarantees that no stale references - * linger in the per-env list. - * - * Usually we map chunks of 16 pages at a time, but if an overflow page begins - * at the tail of the chunk we extend the chunk to include the entire overflow - * page. Unfortunately, pages can be turned into overflow pages after their - * chunk was already mapped. In that case we must remap the chunk if the - * overflow page is referenced. If the chunk's refcnt is 0 we can just remap - * it, otherwise we temporarily map a new chunk just for the overflow page. - * - * @note this chunk handling means we cannot guarantee that a data item - * returned from the DB will stay alive for the duration of the transaction: - * We unref pages as soon as a cursor moves away from the page - * A subsequent op may cause a purge, which may unmap any unref'd chunks - * The caller must copy the data if it must be used later in the same txn. - * - * Also - our reference counting revolves around cursors, but overflow pages - * aren't pointed to by a cursor's page stack. We have to remember them - * explicitly, in the added mc_ovpg field. A single cursor can only hold a - * reference to one overflow page at a time. - * - * @param[in] txn the transaction for this access. - * @param[in] pgno the page number for the page to retrieve. - * @param[out] ret address of a pointer where the page's address will be stored. - * @return 0 on success, non-zero on failure. - */ -static int -mdb_rpage_get(MDB_txn *txn, pgno_t pg0, MDB_page **ret) -{ - MDB_env *env = txn->mt_env; - MDB_page *p; - MDB_ID3L tl = txn->mt_rpages; - MDB_ID3L el = env->me_rpages; - MDB_ID3 id3; - unsigned x, rem; - pgno_t pgno; - int rc, retries = 1; -#ifdef _WIN32 - LARGE_INTEGER off; - SIZE_T len; -#define SET_OFF(off,val) off.QuadPart = val -#define MAP(rc,env,addr,len,off) \ - addr = NULL; \ - rc = NtMapViewOfSection(env->me_fmh, GetCurrentProcess(), &addr, 0, \ - len, &off, &len, ViewUnmap, (env->me_flags & MDB_RDONLY) ? 0 : MEM_RESERVE, PAGE_READONLY); \ - if (rc) rc = mdb_nt2win32(rc) -#else - off_t off; - size_t len; -#define SET_OFF(off,val) off = val -#define MAP(rc,env,addr,len,off) \ - addr = mmap(NULL, len, PROT_READ, MAP_SHARED, env->me_fd, off); \ - rc = (addr == MAP_FAILED) ? errno : 0 -#endif - - /* remember the offset of the actual page number, so we can - * return the correct pointer at the end. - */ - rem = pg0 & (MDB_RPAGE_CHUNK-1); - pgno = pg0 ^ rem; - - id3.mid = 0; - x = mdb_mid3l_search(tl, pgno); - if (x <= tl[0].mid && tl[x].mid == pgno) { - if (x != tl[0].mid && tl[x+1].mid == pg0) - x++; - /* check for overflow size */ - p = (MDB_page *)((char *)tl[x].mptr + rem * env->me_psize); - if (IS_OVERFLOW(p) && p->mp_pages + rem > tl[x].mcnt) { - id3.mcnt = p->mp_pages + rem; - len = id3.mcnt * env->me_psize; - SET_OFF(off, pgno * env->me_psize); - MAP(rc, env, id3.mptr, len, off); - if (rc) - return rc; - /* check for local-only page */ - if (rem) { - mdb_tassert(txn, tl[x].mid != pg0); - /* hope there's room to insert this locally. - * setting mid here tells later code to just insert - * this id3 instead of searching for a match. - */ - id3.mid = pg0; - goto notlocal; - } else { - /* ignore the mapping we got from env, use new one */ - tl[x].mptr = id3.mptr; - tl[x].mcnt = id3.mcnt; - /* if no active ref, see if we can replace in env */ - if (!tl[x].mref) { - unsigned i; - pthread_mutex_lock(&env->me_rpmutex); - i = mdb_mid3l_search(el, tl[x].mid); - if (el[i].mref == 1) { - /* just us, replace it */ - munmap(el[i].mptr, el[i].mcnt * env->me_psize); - el[i].mptr = tl[x].mptr; - el[i].mcnt = tl[x].mcnt; - } else { - /* there are others, remove ourself */ - el[i].mref--; - } - pthread_mutex_unlock(&env->me_rpmutex); - } - } - } - id3.mptr = tl[x].mptr; - id3.mcnt = tl[x].mcnt; - tl[x].mref++; - goto ok; - } - -notlocal: - if (tl[0].mid >= MDB_TRPAGE_MAX - txn->mt_rpcheck) { - unsigned i, y; - /* purge unref'd pages from our list and unref in env */ - pthread_mutex_lock(&env->me_rpmutex); -retry: - y = 0; - for (i=1; i<=tl[0].mid; i++) { - if (!tl[i].mref) { - if (!y) y = i; - /* tmp overflow pages don't go to env */ - if (tl[i].mid & (MDB_RPAGE_CHUNK-1)) { - munmap(tl[i].mptr, tl[i].mcnt * env->me_psize); - continue; - } - x = mdb_mid3l_search(el, tl[i].mid); - el[x].mref--; - } - } - pthread_mutex_unlock(&env->me_rpmutex); - if (!y) { - /* we didn't find any unref'd chunks. - * if we're out of room, fail. - */ - if (tl[0].mid >= MDB_TRPAGE_MAX) - return MDB_TXN_FULL; - /* otherwise, raise threshold for next time around - * and let this go. - */ - txn->mt_rpcheck /= 2; - } else { - /* we found some unused; consolidate the list */ - for (i=y+1; i<= tl[0].mid; i++) - if (tl[i].mref) - tl[y++] = tl[i]; - tl[0].mid = y-1; - /* decrease the check threshold toward its original value */ - if (!txn->mt_rpcheck) - txn->mt_rpcheck = 1; - while (txn->mt_rpcheck < tl[0].mid && txn->mt_rpcheck < MDB_TRPAGE_SIZE/2) - txn->mt_rpcheck *= 2; - } - } - if (tl[0].mid < MDB_TRPAGE_SIZE) { - id3.mref = 1; - if (id3.mid) - goto found; - /* don't map past last written page in read-only envs */ - if ((env->me_flags & MDB_RDONLY) && pgno + MDB_RPAGE_CHUNK-1 > txn->mt_last_pgno) - id3.mcnt = txn->mt_last_pgno + 1 - pgno; - else - id3.mcnt = MDB_RPAGE_CHUNK; - len = id3.mcnt * env->me_psize; - id3.mid = pgno; - - /* search for page in env */ - pthread_mutex_lock(&env->me_rpmutex); - x = mdb_mid3l_search(el, pgno); - if (x <= el[0].mid && el[x].mid == pgno) { - id3.mptr = el[x].mptr; - id3.mcnt = el[x].mcnt; - /* check for overflow size */ - p = (MDB_page *)((char *)id3.mptr + rem * env->me_psize); - if (IS_OVERFLOW(p) && p->mp_pages + rem > id3.mcnt) { - id3.mcnt = p->mp_pages + rem; - len = id3.mcnt * env->me_psize; - SET_OFF(off, pgno * env->me_psize); - MAP(rc, env, id3.mptr, len, off); - if (rc) - goto fail; - if (!el[x].mref) { - munmap(el[x].mptr, env->me_psize * el[x].mcnt); - el[x].mptr = id3.mptr; - el[x].mcnt = id3.mcnt; - } else { - id3.mid = pg0; - pthread_mutex_unlock(&env->me_rpmutex); - goto found; - } - } - el[x].mref++; - pthread_mutex_unlock(&env->me_rpmutex); - goto found; - } - if (el[0].mid >= MDB_ERPAGE_MAX - env->me_rpcheck) { - /* purge unref'd pages */ - unsigned i, y = 0; - for (i=1; i<=el[0].mid; i++) { - if (!el[i].mref) { - if (!y) y = i; - munmap(el[i].mptr, env->me_psize * el[i].mcnt); - } - } - if (!y) { - if (retries) { - /* see if we can unref some local pages */ - retries--; - id3.mid = 0; - goto retry; - } - if (el[0].mid >= MDB_ERPAGE_MAX) { - pthread_mutex_unlock(&env->me_rpmutex); - return MDB_MAP_FULL; - } - env->me_rpcheck /= 2; - } else { - for (i=y+1; i<= el[0].mid; i++) - if (el[i].mref) - el[y++] = el[i]; - el[0].mid = y-1; - if (!env->me_rpcheck) - env->me_rpcheck = 1; - while (env->me_rpcheck < el[0].mid && env->me_rpcheck < MDB_ERPAGE_SIZE/2) - env->me_rpcheck *= 2; - } - } - SET_OFF(off, pgno * env->me_psize); - MAP(rc, env, id3.mptr, len, off); - if (rc) { -fail: - pthread_mutex_unlock(&env->me_rpmutex); - return rc; - } - /* check for overflow size */ - p = (MDB_page *)((char *)id3.mptr + rem * env->me_psize); - if (IS_OVERFLOW(p) && p->mp_pages + rem > id3.mcnt) { - id3.mcnt = p->mp_pages + rem; - munmap(id3.mptr, len); - len = id3.mcnt * env->me_psize; - MAP(rc, env, id3.mptr, len, off); - if (rc) - goto fail; - } - mdb_mid3l_insert(el, &id3); - pthread_mutex_unlock(&env->me_rpmutex); -found: - mdb_mid3l_insert(tl, &id3); - } else { - return MDB_TXN_FULL; - } -ok: - p = (MDB_page *)((char *)id3.mptr + rem * env->me_psize); -#if MDB_DEBUG /* we don't need this check any more */ - if (IS_OVERFLOW(p)) { - mdb_tassert(txn, p->mp_pages + rem <= id3.mcnt); - } -#endif - *ret = p; - return MDB_SUCCESS; -} -#endif - -/** Find the address of the page corresponding to a given page number. - * Set #MDB_TXN_ERROR on failure. - * @param[in] mc the cursor accessing the page. - * @param[in] pgno the page number for the page to retrieve. - * @param[out] ret address of a pointer where the page's address will be stored. - * @param[out] lvl dirty_list inheritance level of found page. 1=current txn, 0=mapped page. - * @return 0 on success, non-zero on failure. - */ -static int -mdb_page_get(MDB_cursor *mc, pgno_t pgno, MDB_page **ret, int *lvl) -{ - MDB_txn *txn = mc->mc_txn; - MDB_page *p = NULL; - int level; - - if (! (mc->mc_flags & (C_ORIG_RDONLY|C_WRITEMAP))) { - MDB_txn *tx2 = txn; - level = 1; - do { - MDB_ID2L dl = tx2->mt_u.dirty_list; - unsigned x; - /* Spilled pages were dirtied in this txn and flushed - * because the dirty list got full. Bring this page - * back in from the map (but don't unspill it here, - * leave that unless page_touch happens again). - */ - if (tx2->mt_spill_pgs) { - MDB_ID pn = pgno << 1; - x = mdb_midl_search(tx2->mt_spill_pgs, pn); - if (x <= tx2->mt_spill_pgs[0] && tx2->mt_spill_pgs[x] == pn) { - goto mapped; - } - } - if (dl[0].mid) { - unsigned x = mdb_mid2l_search(dl, pgno); - if (x <= dl[0].mid && dl[x].mid == pgno) { - p = dl[x].mptr; - goto done; - } - } - level++; - } while ((tx2 = tx2->mt_parent) != NULL); - } - - if (pgno >= txn->mt_next_pgno) { - DPRINTF(("page %"Yu" not found", pgno)); - txn->mt_flags |= MDB_TXN_ERROR; - return MDB_PAGE_NOTFOUND; - } - - level = 0; - -mapped: - { -#ifdef MDB_VL32 - int rc = mdb_rpage_get(txn, pgno, &p); - if (rc) { - txn->mt_flags |= MDB_TXN_ERROR; - return rc; - } -#else - MDB_env *env = txn->mt_env; - p = (MDB_page *)(env->me_map + env->me_psize * pgno); -#endif - } - -done: - *ret = p; - if (lvl) - *lvl = level; - return MDB_SUCCESS; -} - -/** Finish #mdb_page_search() / #mdb_page_search_lowest(). - * The cursor is at the root page, set up the rest of it. - */ -static int -mdb_page_search_root(MDB_cursor *mc, MDB_val *key, int flags) -{ - MDB_page *mp = mc->mc_pg[mc->mc_top]; - int rc; - DKBUF; - - while (IS_BRANCH(mp)) { - MDB_node *node; - indx_t i; - - DPRINTF(("branch page %"Yu" has %u keys", mp->mp_pgno, NUMKEYS(mp))); - /* Don't assert on branch pages in the FreeDB. We can get here - * while in the process of rebalancing a FreeDB branch page; we must - * let that proceed. ITS#8336 - */ - mdb_cassert(mc, !mc->mc_dbi || NUMKEYS(mp) > 1); - DPRINTF(("found index 0 to page %"Yu, NODEPGNO(NODEPTR(mp, 0)))); - - if (flags & (MDB_PS_FIRST|MDB_PS_LAST)) { - i = 0; - if (flags & MDB_PS_LAST) { - i = NUMKEYS(mp) - 1; - /* if already init'd, see if we're already in right place */ - if (mc->mc_flags & C_INITIALIZED) { - if (mc->mc_ki[mc->mc_top] == i) { - mc->mc_top = mc->mc_snum++; - mp = mc->mc_pg[mc->mc_top]; - goto ready; - } - } - } - } else { - int exact; - node = mdb_node_search(mc, key, &exact); - if (node == NULL) - i = NUMKEYS(mp) - 1; - else { - i = mc->mc_ki[mc->mc_top]; - if (!exact) { - mdb_cassert(mc, i > 0); - i--; - } - } - DPRINTF(("following index %u for key [%s]", i, DKEY(key))); - } - - mdb_cassert(mc, i < NUMKEYS(mp)); - node = NODEPTR(mp, i); - - if ((rc = mdb_page_get(mc, NODEPGNO(node), &mp, NULL)) != 0) - return rc; - - mc->mc_ki[mc->mc_top] = i; - if ((rc = mdb_cursor_push(mc, mp))) - return rc; - -ready: - if (flags & MDB_PS_MODIFY) { - if ((rc = mdb_page_touch(mc)) != 0) - return rc; - mp = mc->mc_pg[mc->mc_top]; - } - } - - if (!IS_LEAF(mp)) { - DPRINTF(("internal error, index points to a %02X page!?", - mp->mp_flags)); - mc->mc_txn->mt_flags |= MDB_TXN_ERROR; - return MDB_CORRUPTED; - } - - DPRINTF(("found leaf page %"Yu" for key [%s]", mp->mp_pgno, - key ? DKEY(key) : "null")); - mc->mc_flags |= C_INITIALIZED; - mc->mc_flags &= ~C_EOF; - - return MDB_SUCCESS; -} - -/** Search for the lowest key under the current branch page. - * This just bypasses a NUMKEYS check in the current page - * before calling mdb_page_search_root(), because the callers - * are all in situations where the current page is known to - * be underfilled. - */ -static int -mdb_page_search_lowest(MDB_cursor *mc) -{ - MDB_page *mp = mc->mc_pg[mc->mc_top]; - MDB_node *node = NODEPTR(mp, 0); - int rc; - - if ((rc = mdb_page_get(mc, NODEPGNO(node), &mp, NULL)) != 0) - return rc; - - mc->mc_ki[mc->mc_top] = 0; - if ((rc = mdb_cursor_push(mc, mp))) - return rc; - return mdb_page_search_root(mc, NULL, MDB_PS_FIRST); -} - -/** Search for the page a given key should be in. - * Push it and its parent pages on the cursor stack. - * @param[in,out] mc the cursor for this operation. - * @param[in] key the key to search for, or NULL for first/last page. - * @param[in] flags If MDB_PS_MODIFY is set, visited pages in the DB - * are touched (updated with new page numbers). - * If MDB_PS_FIRST or MDB_PS_LAST is set, find first or last leaf. - * This is used by #mdb_cursor_first() and #mdb_cursor_last(). - * If MDB_PS_ROOTONLY set, just fetch root node, no further lookups. - * @return 0 on success, non-zero on failure. - */ -static int -mdb_page_search(MDB_cursor *mc, MDB_val *key, int flags) -{ - int rc; - pgno_t root; - - /* Make sure the txn is still viable, then find the root from - * the txn's db table and set it as the root of the cursor's stack. - */ - if (mc->mc_txn->mt_flags & MDB_TXN_BLOCKED) { - DPUTS("transaction may not be used now"); - return MDB_BAD_TXN; - } else { - /* Make sure we're using an up-to-date root */ - if (*mc->mc_dbflag & DB_STALE) { - MDB_cursor mc2; - if (TXN_DBI_CHANGED(mc->mc_txn, mc->mc_dbi)) - return MDB_BAD_DBI; - mdb_cursor_init(&mc2, mc->mc_txn, MAIN_DBI, NULL); - rc = mdb_page_search(&mc2, &mc->mc_dbx->md_name, 0); - if (rc) - return rc; - { - MDB_val data; - int exact = 0; - uint16_t flags; - MDB_node *leaf = mdb_node_search(&mc2, - &mc->mc_dbx->md_name, &exact); - if (!exact) - return MDB_NOTFOUND; - if ((leaf->mn_flags & (F_DUPDATA|F_SUBDATA)) != F_SUBDATA) - return MDB_INCOMPATIBLE; /* not a named DB */ - rc = mdb_node_read(&mc2, leaf, &data); - if (rc) - return rc; - memcpy(&flags, ((char *) data.mv_data + offsetof(MDB_db, md_flags)), - sizeof(uint16_t)); - /* The txn may not know this DBI, or another process may - * have dropped and recreated the DB with other flags. - */ - if ((mc->mc_db->md_flags & PERSISTENT_FLAGS) != flags) - return MDB_INCOMPATIBLE; - memcpy(mc->mc_db, data.mv_data, sizeof(MDB_db)); - } - *mc->mc_dbflag &= ~DB_STALE; - } - root = mc->mc_db->md_root; - - if (root == P_INVALID) { /* Tree is empty. */ - DPUTS("tree is empty"); - return MDB_NOTFOUND; - } - } - - mdb_cassert(mc, root > 1); - if (!mc->mc_pg[0] || mc->mc_pg[0]->mp_pgno != root) { -#ifdef MDB_VL32 - if (mc->mc_pg[0]) - MDB_PAGE_UNREF(mc->mc_txn, mc->mc_pg[0]); -#endif - if ((rc = mdb_page_get(mc, root, &mc->mc_pg[0], NULL)) != 0) - return rc; - } - -#ifdef MDB_VL32 - { - int i; - for (i=1; imc_snum; i++) - MDB_PAGE_UNREF(mc->mc_txn, mc->mc_pg[i]); - } -#endif - mc->mc_snum = 1; - mc->mc_top = 0; - - DPRINTF(("db %d root page %"Yu" has flags 0x%X", - DDBI(mc), root, mc->mc_pg[0]->mp_flags)); - - if (flags & MDB_PS_MODIFY) { - if ((rc = mdb_page_touch(mc))) - return rc; - } - - if (flags & MDB_PS_ROOTONLY) - return MDB_SUCCESS; - - return mdb_page_search_root(mc, key, flags); -} - -static int -mdb_ovpage_free(MDB_cursor *mc, MDB_page *mp) -{ - MDB_txn *txn = mc->mc_txn; - pgno_t pg = mp->mp_pgno; - unsigned x = 0, ovpages = mp->mp_pages; - MDB_env *env = txn->mt_env; - MDB_IDL sl = txn->mt_spill_pgs; - MDB_ID pn = pg << 1; - int rc; - - DPRINTF(("free ov page %"Yu" (%d)", pg, ovpages)); - /* If the page is dirty or on the spill list we just acquired it, - * so we should give it back to our current free list, if any. - * Otherwise put it onto the list of pages we freed in this txn. - * - * Won't create me_pghead: me_pglast must be inited along with it. - * Unsupported in nested txns: They would need to hide the page - * range in ancestor txns' dirty and spilled lists. - */ - if (env->me_pghead && - !txn->mt_parent && - ((mp->mp_flags & P_DIRTY) || - (sl && (x = mdb_midl_search(sl, pn)) <= sl[0] && sl[x] == pn))) - { - unsigned i, j; - pgno_t *mop; - MDB_ID2 *dl, ix, iy; - rc = mdb_midl_need(&env->me_pghead, ovpages); - if (rc) - return rc; - if (!(mp->mp_flags & P_DIRTY)) { - /* This page is no longer spilled */ - if (x == sl[0]) - sl[0]--; - else - sl[x] |= 1; - goto release; - } - /* Remove from dirty list */ - dl = txn->mt_u.dirty_list; - x = dl[0].mid--; - for (ix = dl[x]; ix.mptr != mp; ix = iy) { - if (x > 1) { - x--; - iy = dl[x]; - dl[x] = ix; - } else { - mdb_cassert(mc, x > 1); - j = ++(dl[0].mid); - dl[j] = ix; /* Unsorted. OK when MDB_TXN_ERROR. */ - txn->mt_flags |= MDB_TXN_ERROR; - return MDB_PROBLEM; - } - } - txn->mt_dirty_room++; - if (!(env->me_flags & MDB_WRITEMAP)) - mdb_dpage_free(env, mp); -release: - /* Insert in me_pghead */ - mop = env->me_pghead; - j = mop[0] + ovpages; - for (i = mop[0]; i && mop[i] < pg; i--) - mop[j--] = mop[i]; - while (j>i) - mop[j--] = pg++; - mop[0] += ovpages; - } else { - rc = mdb_midl_append_range(&txn->mt_free_pgs, pg, ovpages); - if (rc) - return rc; - } -#ifdef MDB_VL32 - if (mc->mc_ovpg == mp) - mc->mc_ovpg = NULL; -#endif - mc->mc_db->md_overflow_pages -= ovpages; - return 0; -} - -/** Return the data associated with a given node. - * @param[in] mc The cursor for this operation. - * @param[in] leaf The node being read. - * @param[out] data Updated to point to the node's data. - * @return 0 on success, non-zero on failure. - */ -static int -mdb_node_read(MDB_cursor *mc, MDB_node *leaf, MDB_val *data) -{ - MDB_page *omp; /* overflow page */ - pgno_t pgno; - int rc; - - if (MC_OVPG(mc)) { - MDB_PAGE_UNREF(mc->mc_txn, MC_OVPG(mc)); - MC_SET_OVPG(mc, NULL); - } - if (!F_ISSET(leaf->mn_flags, F_BIGDATA)) { - data->mv_size = NODEDSZ(leaf); - data->mv_data = NODEDATA(leaf); - return MDB_SUCCESS; - } - - /* Read overflow data. - */ - data->mv_size = NODEDSZ(leaf); - memcpy(&pgno, NODEDATA(leaf), sizeof(pgno)); - if ((rc = mdb_page_get(mc, pgno, &omp, NULL)) != 0) { - DPRINTF(("read overflow page %"Yu" failed", pgno)); - return rc; - } - data->mv_data = METADATA(omp); - MC_SET_OVPG(mc, omp); - - return MDB_SUCCESS; -} - -int -mdb_get(MDB_txn *txn, MDB_dbi dbi, - MDB_val *key, MDB_val *data) -{ - MDB_cursor mc; - MDB_xcursor mx; - int exact = 0, rc; - DKBUF; - - DPRINTF(("===> get db %u key [%s]", dbi, DKEY(key))); - - if (!key || !data || !TXN_DBI_EXIST(txn, dbi, DB_USRVALID)) - return EINVAL; - - if (txn->mt_flags & MDB_TXN_BLOCKED) - return MDB_BAD_TXN; - - mdb_cursor_init(&mc, txn, dbi, &mx); - rc = mdb_cursor_set(&mc, key, data, MDB_SET, &exact); - /* unref all the pages when MDB_VL32 - caller must copy the data - * before doing anything else - */ - MDB_CURSOR_UNREF(&mc, 1); - return rc; -} - -/** Find a sibling for a page. - * Replaces the page at the top of the cursor's stack with the - * specified sibling, if one exists. - * @param[in] mc The cursor for this operation. - * @param[in] move_right Non-zero if the right sibling is requested, - * otherwise the left sibling. - * @return 0 on success, non-zero on failure. - */ -static int -mdb_cursor_sibling(MDB_cursor *mc, int move_right) -{ - int rc; - MDB_node *indx; - MDB_page *mp; -#ifdef MDB_VL32 - MDB_page *op; -#endif - - if (mc->mc_snum < 2) { - return MDB_NOTFOUND; /* root has no siblings */ - } - -#ifdef MDB_VL32 - op = mc->mc_pg[mc->mc_top]; -#endif - mdb_cursor_pop(mc); - DPRINTF(("parent page is page %"Yu", index %u", - mc->mc_pg[mc->mc_top]->mp_pgno, mc->mc_ki[mc->mc_top])); - - if (move_right ? (mc->mc_ki[mc->mc_top] + 1u >= NUMKEYS(mc->mc_pg[mc->mc_top])) - : (mc->mc_ki[mc->mc_top] == 0)) { - DPRINTF(("no more keys left, moving to %s sibling", - move_right ? "right" : "left")); - if ((rc = mdb_cursor_sibling(mc, move_right)) != MDB_SUCCESS) { - /* undo cursor_pop before returning */ - mc->mc_top++; - mc->mc_snum++; - return rc; - } - } else { - if (move_right) - mc->mc_ki[mc->mc_top]++; - else - mc->mc_ki[mc->mc_top]--; - DPRINTF(("just moving to %s index key %u", - move_right ? "right" : "left", mc->mc_ki[mc->mc_top])); - } - mdb_cassert(mc, IS_BRANCH(mc->mc_pg[mc->mc_top])); - - MDB_PAGE_UNREF(mc->mc_txn, op); - - indx = NODEPTR(mc->mc_pg[mc->mc_top], mc->mc_ki[mc->mc_top]); - if ((rc = mdb_page_get(mc, NODEPGNO(indx), &mp, NULL)) != 0) { - /* mc will be inconsistent if caller does mc_snum++ as above */ - mc->mc_flags &= ~(C_INITIALIZED|C_EOF); - return rc; - } - - mdb_cursor_push(mc, mp); - if (!move_right) - mc->mc_ki[mc->mc_top] = NUMKEYS(mp)-1; - - return MDB_SUCCESS; -} - -/** Move the cursor to the next data item. */ -static int -mdb_cursor_next(MDB_cursor *mc, MDB_val *key, MDB_val *data, MDB_cursor_op op) -{ - MDB_page *mp; - MDB_node *leaf; - int rc; - - if ((mc->mc_flags & C_DEL && op == MDB_NEXT_DUP)) - return MDB_NOTFOUND; - - if (!(mc->mc_flags & C_INITIALIZED)) - return mdb_cursor_first(mc, key, data); - - mp = mc->mc_pg[mc->mc_top]; - - if (mc->mc_flags & C_EOF) { - if (mc->mc_ki[mc->mc_top] >= NUMKEYS(mp)-1) - return MDB_NOTFOUND; - mc->mc_flags ^= C_EOF; - } - - if (mc->mc_db->md_flags & MDB_DUPSORT) { - leaf = NODEPTR(mp, mc->mc_ki[mc->mc_top]); - if (F_ISSET(leaf->mn_flags, F_DUPDATA)) { - if (op == MDB_NEXT || op == MDB_NEXT_DUP) { - rc = mdb_cursor_next(&mc->mc_xcursor->mx_cursor, data, NULL, MDB_NEXT); - if (op != MDB_NEXT || rc != MDB_NOTFOUND) { - if (rc == MDB_SUCCESS) - MDB_GET_KEY(leaf, key); - return rc; - } - } - else { - MDB_CURSOR_UNREF(&mc->mc_xcursor->mx_cursor, 0); - } - } else { - mc->mc_xcursor->mx_cursor.mc_flags &= ~(C_INITIALIZED|C_EOF); - if (op == MDB_NEXT_DUP) - return MDB_NOTFOUND; - } - } - - DPRINTF(("cursor_next: top page is %"Yu" in cursor %p", - mdb_dbg_pgno(mp), (void *) mc)); - if (mc->mc_flags & C_DEL) { - mc->mc_flags ^= C_DEL; - goto skip; - } - - if (mc->mc_ki[mc->mc_top] + 1u >= NUMKEYS(mp)) { - DPUTS("=====> move to next sibling page"); - if ((rc = mdb_cursor_sibling(mc, 1)) != MDB_SUCCESS) { - mc->mc_flags |= C_EOF; - return rc; - } - mp = mc->mc_pg[mc->mc_top]; - DPRINTF(("next page is %"Yu", key index %u", mp->mp_pgno, mc->mc_ki[mc->mc_top])); - } else - mc->mc_ki[mc->mc_top]++; - -skip: - DPRINTF(("==> cursor points to page %"Yu" with %u keys, key index %u", - mdb_dbg_pgno(mp), NUMKEYS(mp), mc->mc_ki[mc->mc_top])); - - if (IS_LEAF2(mp)) { - key->mv_size = mc->mc_db->md_pad; - key->mv_data = LEAF2KEY(mp, mc->mc_ki[mc->mc_top], key->mv_size); - return MDB_SUCCESS; - } - - mdb_cassert(mc, IS_LEAF(mp)); - leaf = NODEPTR(mp, mc->mc_ki[mc->mc_top]); - - if (F_ISSET(leaf->mn_flags, F_DUPDATA)) { - mdb_xcursor_init1(mc, leaf); - } - if (data) { - if ((rc = mdb_node_read(mc, leaf, data)) != MDB_SUCCESS) - return rc; - - if (F_ISSET(leaf->mn_flags, F_DUPDATA)) { - rc = mdb_cursor_first(&mc->mc_xcursor->mx_cursor, data, NULL); - if (rc != MDB_SUCCESS) - return rc; - } - } - - MDB_GET_KEY(leaf, key); - return MDB_SUCCESS; -} - -/** Move the cursor to the previous data item. */ -static int -mdb_cursor_prev(MDB_cursor *mc, MDB_val *key, MDB_val *data, MDB_cursor_op op) -{ - MDB_page *mp; - MDB_node *leaf; - int rc; - - if (!(mc->mc_flags & C_INITIALIZED)) { - rc = mdb_cursor_last(mc, key, data); - if (rc) - return rc; - mc->mc_ki[mc->mc_top]++; - } - - mp = mc->mc_pg[mc->mc_top]; - - if (mc->mc_db->md_flags & MDB_DUPSORT) { - leaf = NODEPTR(mp, mc->mc_ki[mc->mc_top]); - if (F_ISSET(leaf->mn_flags, F_DUPDATA)) { - if (op == MDB_PREV || op == MDB_PREV_DUP) { - rc = mdb_cursor_prev(&mc->mc_xcursor->mx_cursor, data, NULL, MDB_PREV); - if (op != MDB_PREV || rc != MDB_NOTFOUND) { - if (rc == MDB_SUCCESS) { - MDB_GET_KEY(leaf, key); - mc->mc_flags &= ~C_EOF; - } - return rc; - } - } - else { - MDB_CURSOR_UNREF(&mc->mc_xcursor->mx_cursor, 0); - } - } else { - mc->mc_xcursor->mx_cursor.mc_flags &= ~(C_INITIALIZED|C_EOF); - if (op == MDB_PREV_DUP) - return MDB_NOTFOUND; - } - } - - DPRINTF(("cursor_prev: top page is %"Yu" in cursor %p", - mdb_dbg_pgno(mp), (void *) mc)); - - mc->mc_flags &= ~(C_EOF|C_DEL); - - if (mc->mc_ki[mc->mc_top] == 0) { - DPUTS("=====> move to prev sibling page"); - if ((rc = mdb_cursor_sibling(mc, 0)) != MDB_SUCCESS) { - return rc; - } - mp = mc->mc_pg[mc->mc_top]; - mc->mc_ki[mc->mc_top] = NUMKEYS(mp) - 1; - DPRINTF(("prev page is %"Yu", key index %u", mp->mp_pgno, mc->mc_ki[mc->mc_top])); - } else - mc->mc_ki[mc->mc_top]--; - - DPRINTF(("==> cursor points to page %"Yu" with %u keys, key index %u", - mdb_dbg_pgno(mp), NUMKEYS(mp), mc->mc_ki[mc->mc_top])); - - if (IS_LEAF2(mp)) { - key->mv_size = mc->mc_db->md_pad; - key->mv_data = LEAF2KEY(mp, mc->mc_ki[mc->mc_top], key->mv_size); - return MDB_SUCCESS; - } - - mdb_cassert(mc, IS_LEAF(mp)); - leaf = NODEPTR(mp, mc->mc_ki[mc->mc_top]); - - if (F_ISSET(leaf->mn_flags, F_DUPDATA)) { - mdb_xcursor_init1(mc, leaf); - } - if (data) { - if ((rc = mdb_node_read(mc, leaf, data)) != MDB_SUCCESS) - return rc; - - if (F_ISSET(leaf->mn_flags, F_DUPDATA)) { - rc = mdb_cursor_last(&mc->mc_xcursor->mx_cursor, data, NULL); - if (rc != MDB_SUCCESS) - return rc; - } - } - - MDB_GET_KEY(leaf, key); - return MDB_SUCCESS; -} - -/** Set the cursor on a specific data item. */ -static int -mdb_cursor_set(MDB_cursor *mc, MDB_val *key, MDB_val *data, - MDB_cursor_op op, int *exactp) -{ - int rc; - MDB_page *mp; - MDB_node *leaf = NULL; - DKBUF; - - if (key->mv_size == 0) - return MDB_BAD_VALSIZE; - - if (mc->mc_xcursor) { - MDB_CURSOR_UNREF(&mc->mc_xcursor->mx_cursor, 0); - mc->mc_xcursor->mx_cursor.mc_flags &= ~(C_INITIALIZED|C_EOF); - } - - /* See if we're already on the right page */ - if (mc->mc_flags & C_INITIALIZED) { - MDB_val nodekey; - - mp = mc->mc_pg[mc->mc_top]; - if (!NUMKEYS(mp)) { - mc->mc_ki[mc->mc_top] = 0; - return MDB_NOTFOUND; - } - if (mp->mp_flags & P_LEAF2) { - nodekey.mv_size = mc->mc_db->md_pad; - nodekey.mv_data = LEAF2KEY(mp, 0, nodekey.mv_size); - } else { - leaf = NODEPTR(mp, 0); - MDB_GET_KEY2(leaf, nodekey); - } - rc = mc->mc_dbx->md_cmp(key, &nodekey); - if (rc == 0) { - /* Probably happens rarely, but first node on the page - * was the one we wanted. - */ - mc->mc_ki[mc->mc_top] = 0; - if (exactp) - *exactp = 1; - goto set1; - } - if (rc > 0) { - unsigned int i; - unsigned int nkeys = NUMKEYS(mp); - if (nkeys > 1) { - if (mp->mp_flags & P_LEAF2) { - nodekey.mv_data = LEAF2KEY(mp, - nkeys-1, nodekey.mv_size); - } else { - leaf = NODEPTR(mp, nkeys-1); - MDB_GET_KEY2(leaf, nodekey); - } - rc = mc->mc_dbx->md_cmp(key, &nodekey); - if (rc == 0) { - /* last node was the one we wanted */ - mc->mc_ki[mc->mc_top] = nkeys-1; - if (exactp) - *exactp = 1; - goto set1; - } - if (rc < 0) { - if (mc->mc_ki[mc->mc_top] < NUMKEYS(mp)) { - /* This is definitely the right page, skip search_page */ - if (mp->mp_flags & P_LEAF2) { - nodekey.mv_data = LEAF2KEY(mp, - mc->mc_ki[mc->mc_top], nodekey.mv_size); - } else { - leaf = NODEPTR(mp, mc->mc_ki[mc->mc_top]); - MDB_GET_KEY2(leaf, nodekey); - } - rc = mc->mc_dbx->md_cmp(key, &nodekey); - if (rc == 0) { - /* current node was the one we wanted */ - if (exactp) - *exactp = 1; - goto set1; - } - } - rc = 0; - mc->mc_flags &= ~C_EOF; - goto set2; - } - } - /* If any parents have right-sibs, search. - * Otherwise, there's nothing further. - */ - for (i=0; imc_top; i++) - if (mc->mc_ki[i] < - NUMKEYS(mc->mc_pg[i])-1) - break; - if (i == mc->mc_top) { - /* There are no other pages */ - mc->mc_ki[mc->mc_top] = nkeys; - return MDB_NOTFOUND; - } - } - if (!mc->mc_top) { - /* There are no other pages */ - mc->mc_ki[mc->mc_top] = 0; - if (op == MDB_SET_RANGE && !exactp) { - rc = 0; - goto set1; - } else - return MDB_NOTFOUND; - } - } else { - mc->mc_pg[0] = 0; - } - - rc = mdb_page_search(mc, key, 0); - if (rc != MDB_SUCCESS) - return rc; - - mp = mc->mc_pg[mc->mc_top]; - mdb_cassert(mc, IS_LEAF(mp)); - -set2: - leaf = mdb_node_search(mc, key, exactp); - if (exactp != NULL && !*exactp) { - /* MDB_SET specified and not an exact match. */ - return MDB_NOTFOUND; - } - - if (leaf == NULL) { - DPUTS("===> inexact leaf not found, goto sibling"); - if ((rc = mdb_cursor_sibling(mc, 1)) != MDB_SUCCESS) { - mc->mc_flags |= C_EOF; - return rc; /* no entries matched */ - } - mp = mc->mc_pg[mc->mc_top]; - mdb_cassert(mc, IS_LEAF(mp)); - leaf = NODEPTR(mp, 0); - } - -set1: - mc->mc_flags |= C_INITIALIZED; - mc->mc_flags &= ~C_EOF; - - if (IS_LEAF2(mp)) { - if (op == MDB_SET_RANGE || op == MDB_SET_KEY) { - key->mv_size = mc->mc_db->md_pad; - key->mv_data = LEAF2KEY(mp, mc->mc_ki[mc->mc_top], key->mv_size); - } - return MDB_SUCCESS; - } - - if (F_ISSET(leaf->mn_flags, F_DUPDATA)) { - mdb_xcursor_init1(mc, leaf); - } - if (data) { - if (F_ISSET(leaf->mn_flags, F_DUPDATA)) { - if (op == MDB_SET || op == MDB_SET_KEY || op == MDB_SET_RANGE) { - rc = mdb_cursor_first(&mc->mc_xcursor->mx_cursor, data, NULL); - } else { - int ex2, *ex2p; - if (op == MDB_GET_BOTH) { - ex2p = &ex2; - ex2 = 0; - } else { - ex2p = NULL; - } - rc = mdb_cursor_set(&mc->mc_xcursor->mx_cursor, data, NULL, MDB_SET_RANGE, ex2p); - if (rc != MDB_SUCCESS) - return rc; - } - } else if (op == MDB_GET_BOTH || op == MDB_GET_BOTH_RANGE) { - MDB_val olddata; - MDB_cmp_func *dcmp; - if ((rc = mdb_node_read(mc, leaf, &olddata)) != MDB_SUCCESS) - return rc; - dcmp = mc->mc_dbx->md_dcmp; - if (NEED_CMP_CLONG(dcmp, olddata.mv_size)) - dcmp = mdb_cmp_clong; - rc = dcmp(data, &olddata); - if (rc) { - if (op == MDB_GET_BOTH || rc > 0) - return MDB_NOTFOUND; - rc = 0; - } - *data = olddata; - - } else { - if (mc->mc_xcursor) - mc->mc_xcursor->mx_cursor.mc_flags &= ~(C_INITIALIZED|C_EOF); - if ((rc = mdb_node_read(mc, leaf, data)) != MDB_SUCCESS) - return rc; - } - } - - /* The key already matches in all other cases */ - if (op == MDB_SET_RANGE || op == MDB_SET_KEY) - MDB_GET_KEY(leaf, key); - DPRINTF(("==> cursor placed on key [%s]", DKEY(key))); - - return rc; -} - -/** Move the cursor to the first item in the database. */ -static int -mdb_cursor_first(MDB_cursor *mc, MDB_val *key, MDB_val *data) -{ - int rc; - MDB_node *leaf; - - if (mc->mc_xcursor) { - MDB_CURSOR_UNREF(&mc->mc_xcursor->mx_cursor, 0); - mc->mc_xcursor->mx_cursor.mc_flags &= ~(C_INITIALIZED|C_EOF); - } - - if (!(mc->mc_flags & C_INITIALIZED) || mc->mc_top) { - rc = mdb_page_search(mc, NULL, MDB_PS_FIRST); - if (rc != MDB_SUCCESS) - return rc; - } - mdb_cassert(mc, IS_LEAF(mc->mc_pg[mc->mc_top])); - - leaf = NODEPTR(mc->mc_pg[mc->mc_top], 0); - mc->mc_flags |= C_INITIALIZED; - mc->mc_flags &= ~C_EOF; - - mc->mc_ki[mc->mc_top] = 0; - - if (IS_LEAF2(mc->mc_pg[mc->mc_top])) { - key->mv_size = mc->mc_db->md_pad; - key->mv_data = LEAF2KEY(mc->mc_pg[mc->mc_top], 0, key->mv_size); - return MDB_SUCCESS; - } - - if (data) { - if (F_ISSET(leaf->mn_flags, F_DUPDATA)) { - mdb_xcursor_init1(mc, leaf); - rc = mdb_cursor_first(&mc->mc_xcursor->mx_cursor, data, NULL); - if (rc) - return rc; - } else { - if ((rc = mdb_node_read(mc, leaf, data)) != MDB_SUCCESS) - return rc; - } - } - MDB_GET_KEY(leaf, key); - return MDB_SUCCESS; -} - -/** Move the cursor to the last item in the database. */ -static int -mdb_cursor_last(MDB_cursor *mc, MDB_val *key, MDB_val *data) -{ - int rc; - MDB_node *leaf; - - if (mc->mc_xcursor) { - MDB_CURSOR_UNREF(&mc->mc_xcursor->mx_cursor, 0); - mc->mc_xcursor->mx_cursor.mc_flags &= ~(C_INITIALIZED|C_EOF); - } - - if (!(mc->mc_flags & C_INITIALIZED) || mc->mc_top) { - rc = mdb_page_search(mc, NULL, MDB_PS_LAST); - if (rc != MDB_SUCCESS) - return rc; - } - mdb_cassert(mc, IS_LEAF(mc->mc_pg[mc->mc_top])); - - mc->mc_ki[mc->mc_top] = NUMKEYS(mc->mc_pg[mc->mc_top]) - 1; - mc->mc_flags |= C_INITIALIZED|C_EOF; - leaf = NODEPTR(mc->mc_pg[mc->mc_top], mc->mc_ki[mc->mc_top]); - - if (IS_LEAF2(mc->mc_pg[mc->mc_top])) { - key->mv_size = mc->mc_db->md_pad; - key->mv_data = LEAF2KEY(mc->mc_pg[mc->mc_top], mc->mc_ki[mc->mc_top], key->mv_size); - return MDB_SUCCESS; - } - - if (data) { - if (F_ISSET(leaf->mn_flags, F_DUPDATA)) { - mdb_xcursor_init1(mc, leaf); - rc = mdb_cursor_last(&mc->mc_xcursor->mx_cursor, data, NULL); - if (rc) - return rc; - } else { - if ((rc = mdb_node_read(mc, leaf, data)) != MDB_SUCCESS) - return rc; - } - } - - MDB_GET_KEY(leaf, key); - return MDB_SUCCESS; -} - -int -mdb_cursor_get(MDB_cursor *mc, MDB_val *key, MDB_val *data, - MDB_cursor_op op) -{ - int rc; - int exact = 0; - int (*mfunc)(MDB_cursor *mc, MDB_val *key, MDB_val *data); - - if (mc == NULL) - return EINVAL; - - if (mc->mc_txn->mt_flags & MDB_TXN_BLOCKED) - return MDB_BAD_TXN; - - switch (op) { - case MDB_GET_CURRENT: - if (!(mc->mc_flags & C_INITIALIZED)) { - rc = EINVAL; - } else { - MDB_page *mp = mc->mc_pg[mc->mc_top]; - int nkeys = NUMKEYS(mp); - if (!nkeys || mc->mc_ki[mc->mc_top] >= nkeys) { - mc->mc_ki[mc->mc_top] = nkeys; - rc = MDB_NOTFOUND; - break; - } - rc = MDB_SUCCESS; - if (IS_LEAF2(mp)) { - key->mv_size = mc->mc_db->md_pad; - key->mv_data = LEAF2KEY(mp, mc->mc_ki[mc->mc_top], key->mv_size); - } else { - MDB_node *leaf = NODEPTR(mp, mc->mc_ki[mc->mc_top]); - MDB_GET_KEY(leaf, key); - if (data) { - if (F_ISSET(leaf->mn_flags, F_DUPDATA)) { - rc = mdb_cursor_get(&mc->mc_xcursor->mx_cursor, data, NULL, MDB_GET_CURRENT); - } else { - rc = mdb_node_read(mc, leaf, data); - } - } - } - } - break; - case MDB_GET_BOTH: - case MDB_GET_BOTH_RANGE: - if (data == NULL) { - rc = EINVAL; - break; - } - if (mc->mc_xcursor == NULL) { - rc = MDB_INCOMPATIBLE; - break; - } - /* FALLTHRU */ - case MDB_SET: - case MDB_SET_KEY: - case MDB_SET_RANGE: - if (key == NULL) { - rc = EINVAL; - } else { - rc = mdb_cursor_set(mc, key, data, op, - op == MDB_SET_RANGE ? NULL : &exact); - } - break; - case MDB_GET_MULTIPLE: - if (data == NULL || !(mc->mc_flags & C_INITIALIZED)) { - rc = EINVAL; - break; - } - if (!(mc->mc_db->md_flags & MDB_DUPFIXED)) { - rc = MDB_INCOMPATIBLE; - break; - } - rc = MDB_SUCCESS; - if (!(mc->mc_xcursor->mx_cursor.mc_flags & C_INITIALIZED) || - (mc->mc_xcursor->mx_cursor.mc_flags & C_EOF)) - break; - goto fetchm; - case MDB_NEXT_MULTIPLE: - if (data == NULL) { - rc = EINVAL; - break; - } - if (!(mc->mc_db->md_flags & MDB_DUPFIXED)) { - rc = MDB_INCOMPATIBLE; - break; - } - rc = mdb_cursor_next(mc, key, data, MDB_NEXT_DUP); - if (rc == MDB_SUCCESS) { - if (mc->mc_xcursor->mx_cursor.mc_flags & C_INITIALIZED) { - MDB_cursor *mx; -fetchm: - mx = &mc->mc_xcursor->mx_cursor; - data->mv_size = NUMKEYS(mx->mc_pg[mx->mc_top]) * - mx->mc_db->md_pad; - data->mv_data = METADATA(mx->mc_pg[mx->mc_top]); - mx->mc_ki[mx->mc_top] = NUMKEYS(mx->mc_pg[mx->mc_top])-1; - } else { - rc = MDB_NOTFOUND; - } - } - break; - case MDB_PREV_MULTIPLE: - if (data == NULL) { - rc = EINVAL; - break; - } - if (!(mc->mc_db->md_flags & MDB_DUPFIXED)) { - rc = MDB_INCOMPATIBLE; - break; - } - if (!(mc->mc_flags & C_INITIALIZED)) - rc = mdb_cursor_last(mc, key, data); - else - rc = MDB_SUCCESS; - if (rc == MDB_SUCCESS) { - MDB_cursor *mx = &mc->mc_xcursor->mx_cursor; - if (mx->mc_flags & C_INITIALIZED) { - rc = mdb_cursor_sibling(mx, 0); - if (rc == MDB_SUCCESS) - goto fetchm; - } else { - rc = MDB_NOTFOUND; - } - } - break; - case MDB_NEXT: - case MDB_NEXT_DUP: - case MDB_NEXT_NODUP: - rc = mdb_cursor_next(mc, key, data, op); - break; - case MDB_PREV: - case MDB_PREV_DUP: - case MDB_PREV_NODUP: - rc = mdb_cursor_prev(mc, key, data, op); - break; - case MDB_FIRST: - rc = mdb_cursor_first(mc, key, data); - break; - case MDB_FIRST_DUP: - mfunc = mdb_cursor_first; - mmove: - if (data == NULL || !(mc->mc_flags & C_INITIALIZED)) { - rc = EINVAL; - break; - } - if (mc->mc_xcursor == NULL) { - rc = MDB_INCOMPATIBLE; - break; - } - if (mc->mc_ki[mc->mc_top] >= NUMKEYS(mc->mc_pg[mc->mc_top])) { - mc->mc_ki[mc->mc_top] = NUMKEYS(mc->mc_pg[mc->mc_top]); - rc = MDB_NOTFOUND; - break; - } - { - MDB_node *leaf = NODEPTR(mc->mc_pg[mc->mc_top], mc->mc_ki[mc->mc_top]); - if (!F_ISSET(leaf->mn_flags, F_DUPDATA)) { - MDB_GET_KEY(leaf, key); - rc = mdb_node_read(mc, leaf, data); - break; - } - } - if (!(mc->mc_xcursor->mx_cursor.mc_flags & C_INITIALIZED)) { - rc = EINVAL; - break; - } - rc = mfunc(&mc->mc_xcursor->mx_cursor, data, NULL); - break; - case MDB_LAST: - rc = mdb_cursor_last(mc, key, data); - break; - case MDB_LAST_DUP: - mfunc = mdb_cursor_last; - goto mmove; - default: - DPRINTF(("unhandled/unimplemented cursor operation %u", op)); - rc = EINVAL; - break; - } - - if (mc->mc_flags & C_DEL) - mc->mc_flags ^= C_DEL; - - return rc; -} - -/** Touch all the pages in the cursor stack. Set mc_top. - * Makes sure all the pages are writable, before attempting a write operation. - * @param[in] mc The cursor to operate on. - */ -static int -mdb_cursor_touch(MDB_cursor *mc) -{ - int rc = MDB_SUCCESS; - - if (mc->mc_dbi >= CORE_DBS && !(*mc->mc_dbflag & (DB_DIRTY|DB_DUPDATA))) { - /* Touch DB record of named DB */ - MDB_cursor mc2; - MDB_xcursor mcx; - if (TXN_DBI_CHANGED(mc->mc_txn, mc->mc_dbi)) - return MDB_BAD_DBI; - mdb_cursor_init(&mc2, mc->mc_txn, MAIN_DBI, &mcx); - rc = mdb_page_search(&mc2, &mc->mc_dbx->md_name, MDB_PS_MODIFY); - if (rc) - return rc; - *mc->mc_dbflag |= DB_DIRTY; - } - mc->mc_top = 0; - if (mc->mc_snum) { - do { - rc = mdb_page_touch(mc); - } while (!rc && ++(mc->mc_top) < mc->mc_snum); - mc->mc_top = mc->mc_snum-1; - } - return rc; -} - -/** Do not spill pages to disk if txn is getting full, may fail instead */ -#define MDB_NOSPILL 0x8000 - -int -mdb_cursor_put(MDB_cursor *mc, MDB_val *key, MDB_val *data, - unsigned int flags) -{ - MDB_env *env; - MDB_node *leaf = NULL; - MDB_page *fp, *mp, *sub_root = NULL; - uint16_t fp_flags; - MDB_val xdata, *rdata, dkey, olddata; - MDB_db dummy; - int do_sub = 0, insert_key, insert_data; - unsigned int mcount = 0, dcount = 0, nospill; - size_t nsize; - int rc, rc2; - unsigned int nflags; - DKBUF; - - if (mc == NULL || key == NULL) - return EINVAL; - - env = mc->mc_txn->mt_env; - - /* Check this first so counter will always be zero on any - * early failures. - */ - if (flags & MDB_MULTIPLE) { - dcount = data[1].mv_size; - data[1].mv_size = 0; - if (!F_ISSET(mc->mc_db->md_flags, MDB_DUPFIXED)) - return MDB_INCOMPATIBLE; - } - - nospill = flags & MDB_NOSPILL; - flags &= ~MDB_NOSPILL; - - if (mc->mc_txn->mt_flags & (MDB_TXN_RDONLY|MDB_TXN_BLOCKED)) - return (mc->mc_txn->mt_flags & MDB_TXN_RDONLY) ? EACCES : MDB_BAD_TXN; - - if (key->mv_size-1 >= ENV_MAXKEY(env)) - return MDB_BAD_VALSIZE; - -#if SIZE_MAX > MAXDATASIZE - if (data->mv_size > ((mc->mc_db->md_flags & MDB_DUPSORT) ? ENV_MAXKEY(env) : MAXDATASIZE)) - return MDB_BAD_VALSIZE; -#else - if ((mc->mc_db->md_flags & MDB_DUPSORT) && data->mv_size > ENV_MAXKEY(env)) - return MDB_BAD_VALSIZE; -#endif - - DPRINTF(("==> put db %d key [%s], size %"Z"u, data size %"Z"u", - DDBI(mc), DKEY(key), key ? key->mv_size : 0, data->mv_size)); - - dkey.mv_size = 0; - - if (flags == MDB_CURRENT) { - if (!(mc->mc_flags & C_INITIALIZED)) - return EINVAL; - rc = MDB_SUCCESS; - } else if (mc->mc_db->md_root == P_INVALID) { - /* new database, cursor has nothing to point to */ - mc->mc_snum = 0; - mc->mc_top = 0; - mc->mc_flags &= ~C_INITIALIZED; - rc = MDB_NO_ROOT; - } else { - int exact = 0; - MDB_val d2; - if (flags & MDB_APPEND) { - MDB_val k2; - rc = mdb_cursor_last(mc, &k2, &d2); - if (rc == 0) { - rc = mc->mc_dbx->md_cmp(key, &k2); - if (rc > 0) { - rc = MDB_NOTFOUND; - mc->mc_ki[mc->mc_top]++; - } else { - /* new key is <= last key */ - rc = MDB_KEYEXIST; - } - } - } else { - rc = mdb_cursor_set(mc, key, &d2, MDB_SET, &exact); - } - if ((flags & MDB_NOOVERWRITE) && rc == 0) { - DPRINTF(("duplicate key [%s]", DKEY(key))); - *data = d2; - return MDB_KEYEXIST; - } - if (rc && rc != MDB_NOTFOUND) - return rc; - } - - if (mc->mc_flags & C_DEL) - mc->mc_flags ^= C_DEL; - - /* Cursor is positioned, check for room in the dirty list */ - if (!nospill) { - if (flags & MDB_MULTIPLE) { - rdata = &xdata; - xdata.mv_size = data->mv_size * dcount; - } else { - rdata = data; - } - if ((rc2 = mdb_page_spill(mc, key, rdata))) - return rc2; - } - - if (rc == MDB_NO_ROOT) { - MDB_page *np; - /* new database, write a root leaf page */ - DPUTS("allocating new root leaf page"); - if ((rc2 = mdb_page_new(mc, P_LEAF, 1, &np))) { - return rc2; - } - mdb_cursor_push(mc, np); - mc->mc_db->md_root = np->mp_pgno; - mc->mc_db->md_depth++; - *mc->mc_dbflag |= DB_DIRTY; - if ((mc->mc_db->md_flags & (MDB_DUPSORT|MDB_DUPFIXED)) - == MDB_DUPFIXED) - np->mp_flags |= P_LEAF2; - mc->mc_flags |= C_INITIALIZED; - } else { - /* make sure all cursor pages are writable */ - rc2 = mdb_cursor_touch(mc); - if (rc2) - return rc2; - } - - insert_key = insert_data = rc; - if (insert_key) { - /* The key does not exist */ - DPRINTF(("inserting key at index %i", mc->mc_ki[mc->mc_top])); - if ((mc->mc_db->md_flags & MDB_DUPSORT) && - LEAFSIZE(key, data) > env->me_nodemax) - { - /* Too big for a node, insert in sub-DB. Set up an empty - * "old sub-page" for prep_subDB to expand to a full page. - */ - fp_flags = P_LEAF|P_DIRTY; - fp = env->me_pbuf; - fp->mp_pad = data->mv_size; /* used if MDB_DUPFIXED */ - fp->mp_lower = fp->mp_upper = (PAGEHDRSZ-PAGEBASE); - olddata.mv_size = PAGEHDRSZ; - goto prep_subDB; - } - } else { - /* there's only a key anyway, so this is a no-op */ - if (IS_LEAF2(mc->mc_pg[mc->mc_top])) { - char *ptr; - unsigned int ksize = mc->mc_db->md_pad; - if (key->mv_size != ksize) - return MDB_BAD_VALSIZE; - ptr = LEAF2KEY(mc->mc_pg[mc->mc_top], mc->mc_ki[mc->mc_top], ksize); - memcpy(ptr, key->mv_data, ksize); -fix_parent: - /* if overwriting slot 0 of leaf, need to - * update branch key if there is a parent page - */ - if (mc->mc_top && !mc->mc_ki[mc->mc_top]) { - unsigned short dtop = 1; - mc->mc_top--; - /* slot 0 is always an empty key, find real slot */ - while (mc->mc_top && !mc->mc_ki[mc->mc_top]) { - mc->mc_top--; - dtop++; - } - if (mc->mc_ki[mc->mc_top]) - rc2 = mdb_update_key(mc, key); - else - rc2 = MDB_SUCCESS; - mc->mc_top += dtop; - if (rc2) - return rc2; - } - return MDB_SUCCESS; - } - -more: - leaf = NODEPTR(mc->mc_pg[mc->mc_top], mc->mc_ki[mc->mc_top]); - olddata.mv_size = NODEDSZ(leaf); - olddata.mv_data = NODEDATA(leaf); - - /* DB has dups? */ - if (F_ISSET(mc->mc_db->md_flags, MDB_DUPSORT)) { - /* Prepare (sub-)page/sub-DB to accept the new item, - * if needed. fp: old sub-page or a header faking - * it. mp: new (sub-)page. offset: growth in page - * size. xdata: node data with new page or DB. - */ - unsigned i, offset = 0; - mp = fp = xdata.mv_data = env->me_pbuf; - mp->mp_pgno = mc->mc_pg[mc->mc_top]->mp_pgno; - - /* Was a single item before, must convert now */ - if (!F_ISSET(leaf->mn_flags, F_DUPDATA)) { - MDB_cmp_func *dcmp; - /* Just overwrite the current item */ - if (flags == MDB_CURRENT) - goto current; - dcmp = mc->mc_dbx->md_dcmp; - if (NEED_CMP_CLONG(dcmp, olddata.mv_size)) - dcmp = mdb_cmp_clong; - /* does data match? */ - if (!dcmp(data, &olddata)) { - if (flags & (MDB_NODUPDATA|MDB_APPENDDUP)) - return MDB_KEYEXIST; - /* overwrite it */ - goto current; - } - - /* Back up original data item */ - dkey.mv_size = olddata.mv_size; - dkey.mv_data = memcpy(fp+1, olddata.mv_data, olddata.mv_size); - - /* Make sub-page header for the dup items, with dummy body */ - fp->mp_flags = P_LEAF|P_DIRTY|P_SUBP; - fp->mp_lower = (PAGEHDRSZ-PAGEBASE); - xdata.mv_size = PAGEHDRSZ + dkey.mv_size + data->mv_size; - if (mc->mc_db->md_flags & MDB_DUPFIXED) { - fp->mp_flags |= P_LEAF2; - fp->mp_pad = data->mv_size; - xdata.mv_size += 2 * data->mv_size; /* leave space for 2 more */ - } else { - xdata.mv_size += 2 * (sizeof(indx_t) + NODESIZE) + - (dkey.mv_size & 1) + (data->mv_size & 1); - } - fp->mp_upper = xdata.mv_size - PAGEBASE; - olddata.mv_size = xdata.mv_size; /* pretend olddata is fp */ - } else if (leaf->mn_flags & F_SUBDATA) { - /* Data is on sub-DB, just store it */ - flags |= F_DUPDATA|F_SUBDATA; - goto put_sub; - } else { - /* Data is on sub-page */ - fp = olddata.mv_data; - switch (flags) { - default: - if (!(mc->mc_db->md_flags & MDB_DUPFIXED)) { - offset = EVEN(NODESIZE + sizeof(indx_t) + - data->mv_size); - break; - } - offset = fp->mp_pad; - if (SIZELEFT(fp) < offset) { - offset *= 4; /* space for 4 more */ - break; - } - /* FALLTHRU: Big enough MDB_DUPFIXED sub-page */ - __attribute__ ((fallthrough)); - case MDB_CURRENT: - fp->mp_flags |= P_DIRTY; - COPY_PGNO(fp->mp_pgno, mp->mp_pgno); - mc->mc_xcursor->mx_cursor.mc_pg[0] = fp; - flags |= F_DUPDATA; - goto put_sub; - } - xdata.mv_size = olddata.mv_size + offset; - } - - fp_flags = fp->mp_flags; - if (NODESIZE + NODEKSZ(leaf) + xdata.mv_size > env->me_nodemax) { - /* Too big for a sub-page, convert to sub-DB */ - fp_flags &= ~P_SUBP; -prep_subDB: - if (mc->mc_db->md_flags & MDB_DUPFIXED) { - fp_flags |= P_LEAF2; - dummy.md_pad = fp->mp_pad; - dummy.md_flags = MDB_DUPFIXED; - if (mc->mc_db->md_flags & MDB_INTEGERDUP) - dummy.md_flags |= MDB_INTEGERKEY; - } else { - dummy.md_pad = 0; - dummy.md_flags = 0; - } - dummy.md_depth = 1; - dummy.md_branch_pages = 0; - dummy.md_leaf_pages = 1; - dummy.md_overflow_pages = 0; - dummy.md_entries = NUMKEYS(fp); - xdata.mv_size = sizeof(MDB_db); - xdata.mv_data = &dummy; - if ((rc = mdb_page_alloc(mc, 1, &mp))) - return rc; - offset = env->me_psize - olddata.mv_size; - flags |= F_DUPDATA|F_SUBDATA; - dummy.md_root = mp->mp_pgno; - sub_root = mp; - } - if (mp != fp) { - mp->mp_flags = fp_flags | P_DIRTY; - mp->mp_pad = fp->mp_pad; - mp->mp_lower = fp->mp_lower; - mp->mp_upper = fp->mp_upper + offset; - if (fp_flags & P_LEAF2) { - memcpy(METADATA(mp), METADATA(fp), NUMKEYS(fp) * fp->mp_pad); - } else { - memcpy((char *)mp + mp->mp_upper + PAGEBASE, (char *)fp + fp->mp_upper + PAGEBASE, - olddata.mv_size - fp->mp_upper - PAGEBASE); - memcpy((char *)(&mp->mp_ptrs), (char *)(&fp->mp_ptrs), NUMKEYS(fp) * sizeof(mp->mp_ptrs[0])); - for (i=0; imp_ptrs[i] += offset; - } - } - - rdata = &xdata; - flags |= F_DUPDATA; - do_sub = 1; - if (!insert_key) - mdb_node_del(mc, 0); - goto new_sub; - } -current: - /* LMDB passes F_SUBDATA in 'flags' to write a DB record */ - if ((leaf->mn_flags ^ flags) & F_SUBDATA) - return MDB_INCOMPATIBLE; - /* overflow page overwrites need special handling */ - if (F_ISSET(leaf->mn_flags, F_BIGDATA)) { - MDB_page *omp; - pgno_t pg; - int level, ovpages, dpages = OVPAGES(data->mv_size, env->me_psize); - - memcpy(&pg, olddata.mv_data, sizeof(pg)); - if ((rc2 = mdb_page_get(mc, pg, &omp, &level)) != 0) - return rc2; - ovpages = omp->mp_pages; - - /* Is the ov page large enough? */ - if (ovpages >= dpages) { - if (!(omp->mp_flags & P_DIRTY) && - (level || (env->me_flags & MDB_WRITEMAP))) - { - rc = mdb_page_unspill(mc->mc_txn, omp, &omp); - if (rc) - return rc; - level = 0; /* dirty in this txn or clean */ - } - /* Is it dirty? */ - if (omp->mp_flags & P_DIRTY) { - /* yes, overwrite it. Note in this case we don't - * bother to try shrinking the page if the new data - * is smaller than the overflow threshold. - */ - if (level > 1) { - /* It is writable only in a parent txn */ - size_t sz = (size_t) env->me_psize * ovpages, off; - MDB_page *np = mdb_page_malloc(mc->mc_txn, ovpages); - MDB_ID2 id2; - if (!np) - return ENOMEM; - id2.mid = pg; - id2.mptr = np; - /* Note - this page is already counted in parent's dirty_room */ - rc2 = mdb_mid2l_insert(mc->mc_txn->mt_u.dirty_list, &id2); - mdb_cassert(mc, rc2 == 0); - /* Currently we make the page look as with put() in the - * parent txn, in case the user peeks at MDB_RESERVEd - * or unused parts. Some users treat ovpages specially. - */ - if (!(flags & MDB_RESERVE)) { - /* Skip the part where LMDB will put *data. - * Copy end of page, adjusting alignment so - * compiler may copy words instead of bytes. - */ - off = (PAGEHDRSZ + data->mv_size) & -sizeof(size_t); - memcpy((size_t *)((char *)np + off), - (size_t *)((char *)omp + off), sz - off); - sz = PAGEHDRSZ; - } - memcpy(np, omp, sz); /* Copy beginning of page */ - omp = np; - } - SETDSZ(leaf, data->mv_size); - if (F_ISSET(flags, MDB_RESERVE)) - data->mv_data = METADATA(omp); - else - memcpy(METADATA(omp), data->mv_data, data->mv_size); - return MDB_SUCCESS; - } - } - if ((rc2 = mdb_ovpage_free(mc, omp)) != MDB_SUCCESS) - return rc2; - } else if (data->mv_size == olddata.mv_size) { - /* same size, just replace it. Note that we could - * also reuse this node if the new data is smaller, - * but instead we opt to shrink the node in that case. - */ - if (F_ISSET(flags, MDB_RESERVE)) - data->mv_data = olddata.mv_data; - else if (!(mc->mc_flags & C_SUB)) - memcpy(olddata.mv_data, data->mv_data, data->mv_size); - else { - memcpy(NODEKEY(leaf), key->mv_data, key->mv_size); - goto fix_parent; - } - return MDB_SUCCESS; - } - mdb_node_del(mc, 0); - } - - rdata = data; - -new_sub: - nflags = flags & NODE_ADD_FLAGS; - nsize = IS_LEAF2(mc->mc_pg[mc->mc_top]) ? key->mv_size : mdb_leaf_size(env, key, rdata); - if (SIZELEFT(mc->mc_pg[mc->mc_top]) < nsize) { - if (( flags & (F_DUPDATA|F_SUBDATA)) == F_DUPDATA ) - nflags &= ~MDB_APPEND; /* sub-page may need room to grow */ - if (!insert_key) - nflags |= MDB_SPLIT_REPLACE; - rc = mdb_page_split(mc, key, rdata, P_INVALID, nflags); - } else { - /* There is room already in this leaf page. */ - rc = mdb_node_add(mc, mc->mc_ki[mc->mc_top], key, rdata, 0, nflags); - if (rc == 0) { - /* Adjust other cursors pointing to mp */ - MDB_cursor *m2, *m3; - MDB_dbi dbi = mc->mc_dbi; - unsigned i = mc->mc_top; - MDB_page *mp = mc->mc_pg[i]; - - for (m2 = mc->mc_txn->mt_cursors[dbi]; m2; m2=m2->mc_next) { - if (mc->mc_flags & C_SUB) - m3 = &m2->mc_xcursor->mx_cursor; - else - m3 = m2; - if (m3 == mc || m3->mc_snum < mc->mc_snum || m3->mc_pg[i] != mp) continue; - if (m3->mc_ki[i] >= mc->mc_ki[i] && insert_key) { - m3->mc_ki[i]++; - } - XCURSOR_REFRESH(m3, i, mp); - } - } - } - - if (rc == MDB_SUCCESS) { - /* Now store the actual data in the child DB. Note that we're - * storing the user data in the keys field, so there are strict - * size limits on dupdata. The actual data fields of the child - * DB are all zero size. - */ - if (do_sub) { - int xflags, new_dupdata; - mdb_size_t ecount; -put_sub: - xdata.mv_size = 0; - xdata.mv_data = ""; - leaf = NODEPTR(mc->mc_pg[mc->mc_top], mc->mc_ki[mc->mc_top]); - if (flags & MDB_CURRENT) { - xflags = MDB_CURRENT|MDB_NOSPILL; - } else { - mdb_xcursor_init1(mc, leaf); - xflags = (flags & MDB_NODUPDATA) ? - MDB_NOOVERWRITE|MDB_NOSPILL : MDB_NOSPILL; - } - if (sub_root) - mc->mc_xcursor->mx_cursor.mc_pg[0] = sub_root; - new_dupdata = (int)dkey.mv_size; - /* converted, write the original data first */ - if (dkey.mv_size) { - rc = mdb_cursor_put(&mc->mc_xcursor->mx_cursor, &dkey, &xdata, xflags); - if (rc) - goto bad_sub; - /* we've done our job */ - dkey.mv_size = 0; - } - if (!(leaf->mn_flags & F_SUBDATA) || sub_root) { - /* Adjust other cursors pointing to mp */ - MDB_cursor *m2; - MDB_xcursor *mx = mc->mc_xcursor; - unsigned i = mc->mc_top; - MDB_page *mp = mc->mc_pg[i]; - - for (m2 = mc->mc_txn->mt_cursors[mc->mc_dbi]; m2; m2=m2->mc_next) { - if (m2 == mc || m2->mc_snum < mc->mc_snum) continue; - if (!(m2->mc_flags & C_INITIALIZED)) continue; - if (m2->mc_pg[i] == mp) { - if (m2->mc_ki[i] == mc->mc_ki[i]) { - mdb_xcursor_init2(m2, mx, new_dupdata); - } else if (!insert_key) { - XCURSOR_REFRESH(m2, i, mp); - } - } - } - } - ecount = mc->mc_xcursor->mx_db.md_entries; - if (flags & MDB_APPENDDUP) - xflags |= MDB_APPEND; - rc = mdb_cursor_put(&mc->mc_xcursor->mx_cursor, data, &xdata, xflags); - if (flags & F_SUBDATA) { - void *db = NODEDATA(leaf); - memcpy(db, &mc->mc_xcursor->mx_db, sizeof(MDB_db)); - } - insert_data = mc->mc_xcursor->mx_db.md_entries - ecount; - } - /* Increment count unless we just replaced an existing item. */ - if (insert_data) - mc->mc_db->md_entries++; - if (insert_key) { - /* Invalidate txn if we created an empty sub-DB */ - if (rc) - goto bad_sub; - /* If we succeeded and the key didn't exist before, - * make sure the cursor is marked valid. - */ - mc->mc_flags |= C_INITIALIZED; - } - if (flags & MDB_MULTIPLE) { - if (!rc) { - mcount++; - /* let caller know how many succeeded, if any */ - data[1].mv_size = mcount; - if (mcount < dcount) { - data[0].mv_data = (char *)data[0].mv_data + data[0].mv_size; - insert_key = insert_data = 0; - goto more; - } - } - } - return rc; -bad_sub: - if (rc == MDB_KEYEXIST) /* should not happen, we deleted that item */ - rc = MDB_PROBLEM; - } - mc->mc_txn->mt_flags |= MDB_TXN_ERROR; - return rc; -} - -int -mdb_cursor_del(MDB_cursor *mc, unsigned int flags) -{ - MDB_node *leaf; - MDB_page *mp; - int rc; - - if (mc->mc_txn->mt_flags & (MDB_TXN_RDONLY|MDB_TXN_BLOCKED)) - return (mc->mc_txn->mt_flags & MDB_TXN_RDONLY) ? EACCES : MDB_BAD_TXN; - - if (!(mc->mc_flags & C_INITIALIZED)) - return EINVAL; - - if (mc->mc_ki[mc->mc_top] >= NUMKEYS(mc->mc_pg[mc->mc_top])) - return MDB_NOTFOUND; - - if (!(flags & MDB_NOSPILL) && (rc = mdb_page_spill(mc, NULL, NULL))) - return rc; - - rc = mdb_cursor_touch(mc); - if (rc) - return rc; - - mp = mc->mc_pg[mc->mc_top]; - if (IS_LEAF2(mp)) - goto del_key; - leaf = NODEPTR(mp, mc->mc_ki[mc->mc_top]); - - if (F_ISSET(leaf->mn_flags, F_DUPDATA)) { - if (flags & MDB_NODUPDATA) { - /* mdb_cursor_del0() will subtract the final entry */ - mc->mc_db->md_entries -= mc->mc_xcursor->mx_db.md_entries - 1; - mc->mc_xcursor->mx_cursor.mc_flags &= ~C_INITIALIZED; - } else { - if (!F_ISSET(leaf->mn_flags, F_SUBDATA)) { - mc->mc_xcursor->mx_cursor.mc_pg[0] = NODEDATA(leaf); - } - rc = mdb_cursor_del(&mc->mc_xcursor->mx_cursor, MDB_NOSPILL); - if (rc) - return rc; - /* If sub-DB still has entries, we're done */ - if (mc->mc_xcursor->mx_db.md_entries) { - if (leaf->mn_flags & F_SUBDATA) { - /* update subDB info */ - void *db = NODEDATA(leaf); - memcpy(db, &mc->mc_xcursor->mx_db, sizeof(MDB_db)); - } else { - MDB_cursor *m2; - /* shrink fake page */ - mdb_node_shrink(mp, mc->mc_ki[mc->mc_top]); - leaf = NODEPTR(mp, mc->mc_ki[mc->mc_top]); - mc->mc_xcursor->mx_cursor.mc_pg[0] = NODEDATA(leaf); - /* fix other sub-DB cursors pointed at fake pages on this page */ - for (m2 = mc->mc_txn->mt_cursors[mc->mc_dbi]; m2; m2=m2->mc_next) { - if (m2 == mc || m2->mc_snum < mc->mc_snum) continue; - if (!(m2->mc_flags & C_INITIALIZED)) continue; - if (m2->mc_pg[mc->mc_top] == mp) { - XCURSOR_REFRESH(m2, mc->mc_top, mp); - } - } - } - mc->mc_db->md_entries--; - return rc; - } else { - mc->mc_xcursor->mx_cursor.mc_flags &= ~C_INITIALIZED; - } - /* otherwise fall thru and delete the sub-DB */ - } - - if (leaf->mn_flags & F_SUBDATA) { - /* add all the child DB's pages to the free list */ - rc = mdb_drop0(&mc->mc_xcursor->mx_cursor, 0); - if (rc) - goto fail; - } - } - /* LMDB passes F_SUBDATA in 'flags' to delete a DB record */ - else if ((leaf->mn_flags ^ flags) & F_SUBDATA) { - rc = MDB_INCOMPATIBLE; - goto fail; - } - - /* add overflow pages to free list */ - if (F_ISSET(leaf->mn_flags, F_BIGDATA)) { - MDB_page *omp; - pgno_t pg; - - memcpy(&pg, NODEDATA(leaf), sizeof(pg)); - if ((rc = mdb_page_get(mc, pg, &omp, NULL)) || - (rc = mdb_ovpage_free(mc, omp))) - goto fail; - } - -del_key: - return mdb_cursor_del0(mc); - -fail: - mc->mc_txn->mt_flags |= MDB_TXN_ERROR; - return rc; -} - -/** Allocate and initialize new pages for a database. - * Set #MDB_TXN_ERROR on failure. - * @param[in] mc a cursor on the database being added to. - * @param[in] flags flags defining what type of page is being allocated. - * @param[in] num the number of pages to allocate. This is usually 1, - * unless allocating overflow pages for a large record. - * @param[out] mp Address of a page, or NULL on failure. - * @return 0 on success, non-zero on failure. - */ -static int -mdb_page_new(MDB_cursor *mc, uint32_t flags, int num, MDB_page **mp) -{ - MDB_page *np; - int rc; - - if ((rc = mdb_page_alloc(mc, num, &np))) - return rc; - DPRINTF(("allocated new mpage %"Yu", page size %u", - np->mp_pgno, mc->mc_txn->mt_env->me_psize)); - np->mp_flags = flags | P_DIRTY; - np->mp_lower = (PAGEHDRSZ-PAGEBASE); - np->mp_upper = mc->mc_txn->mt_env->me_psize - PAGEBASE; - - if (IS_BRANCH(np)) - mc->mc_db->md_branch_pages++; - else if (IS_LEAF(np)) - mc->mc_db->md_leaf_pages++; - else if (IS_OVERFLOW(np)) { - mc->mc_db->md_overflow_pages += num; - np->mp_pages = num; - } - *mp = np; - - return 0; -} - -/** Calculate the size of a leaf node. - * The size depends on the environment's page size; if a data item - * is too large it will be put onto an overflow page and the node - * size will only include the key and not the data. Sizes are always - * rounded up to an even number of bytes, to guarantee 2-byte alignment - * of the #MDB_node headers. - * @param[in] env The environment handle. - * @param[in] key The key for the node. - * @param[in] data The data for the node. - * @return The number of bytes needed to store the node. - */ -static size_t -mdb_leaf_size(MDB_env *env, MDB_val *key, MDB_val *data) -{ - size_t sz; - - sz = LEAFSIZE(key, data); - if (sz > env->me_nodemax) { - /* put on overflow page */ - sz -= data->mv_size - sizeof(pgno_t); - } - - return EVEN(sz + sizeof(indx_t)); -} - -/** Calculate the size of a branch node. - * The size should depend on the environment's page size but since - * we currently don't support spilling large keys onto overflow - * pages, it's simply the size of the #MDB_node header plus the - * size of the key. Sizes are always rounded up to an even number - * of bytes, to guarantee 2-byte alignment of the #MDB_node headers. - * @param[in] env The environment handle. - * @param[in] key The key for the node. - * @return The number of bytes needed to store the node. - */ -static size_t -mdb_branch_size(MDB_env *env, MDB_val *key) -{ - size_t sz; - - sz = INDXSIZE(key); - if (sz > env->me_nodemax) { - /* put on overflow page */ - /* not implemented */ - /* sz -= key->size - sizeof(pgno_t); */ - } - - return sz + sizeof(indx_t); -} - -/** Add a node to the page pointed to by the cursor. - * Set #MDB_TXN_ERROR on failure. - * @param[in] mc The cursor for this operation. - * @param[in] indx The index on the page where the new node should be added. - * @param[in] key The key for the new node. - * @param[in] data The data for the new node, if any. - * @param[in] pgno The page number, if adding a branch node. - * @param[in] flags Flags for the node. - * @return 0 on success, non-zero on failure. Possible errors are: - *
    - *
  • ENOMEM - failed to allocate overflow pages for the node. - *
  • MDB_PAGE_FULL - there is insufficient room in the page. This error - * should never happen since all callers already calculate the - * page's free space before calling this function. - *
- */ -static int -mdb_node_add(MDB_cursor *mc, indx_t indx, - MDB_val *key, MDB_val *data, pgno_t pgno, unsigned int flags) -{ - unsigned int i; - size_t node_size = NODESIZE; - ssize_t room; - indx_t ofs; - MDB_node *node; - MDB_page *mp = mc->mc_pg[mc->mc_top]; - MDB_page *ofp = NULL; /* overflow page */ - void *ndata; - DKBUF; - - mdb_cassert(mc, mp->mp_upper >= mp->mp_lower); - - DPRINTF(("add to %s %spage %"Yu" index %i, data size %"Z"u key size %"Z"u [%s]", - IS_LEAF(mp) ? "leaf" : "branch", - IS_SUBP(mp) ? "sub-" : "", - mdb_dbg_pgno(mp), indx, data ? data->mv_size : 0, - key ? key->mv_size : 0, key ? DKEY(key) : "null")); - - if (IS_LEAF2(mp)) { - /* Move higher keys up one slot. */ - int ksize = mc->mc_db->md_pad, dif; - char *ptr = LEAF2KEY(mp, indx, ksize); - dif = NUMKEYS(mp) - indx; - if (dif > 0) - memmove(ptr+ksize, ptr, dif*ksize); - /* insert new key */ - memcpy(ptr, key->mv_data, ksize); - - /* Just using these for counting */ - mp->mp_lower += sizeof(indx_t); - mp->mp_upper -= ksize - sizeof(indx_t); - return MDB_SUCCESS; - } - - room = (ssize_t)SIZELEFT(mp) - (ssize_t)sizeof(indx_t); - if (key != NULL) - node_size += key->mv_size; - if (IS_LEAF(mp)) { - mdb_cassert(mc, key && data); - if (F_ISSET(flags, F_BIGDATA)) { - /* Data already on overflow page. */ - node_size += sizeof(pgno_t); - } else if (node_size + data->mv_size > mc->mc_txn->mt_env->me_nodemax) { - int ovpages = OVPAGES(data->mv_size, mc->mc_txn->mt_env->me_psize); - int rc; - /* Put data on overflow page. */ - DPRINTF(("data size is %"Z"u, node would be %"Z"u, put data on overflow page", - data->mv_size, node_size+data->mv_size)); - node_size = EVEN(node_size + sizeof(pgno_t)); - if ((ssize_t)node_size > room) - goto full; - if ((rc = mdb_page_new(mc, P_OVERFLOW, ovpages, &ofp))) - return rc; - DPRINTF(("allocated overflow page %"Yu, ofp->mp_pgno)); - flags |= F_BIGDATA; - goto update; - } else { - node_size += data->mv_size; - } - } - node_size = EVEN(node_size); - if ((ssize_t)node_size > room) - goto full; - -update: - /* Move higher pointers up one slot. */ - for (i = NUMKEYS(mp); i > indx; i--) - mp->mp_ptrs[i] = mp->mp_ptrs[i - 1]; - - /* Adjust free space offsets. */ - ofs = mp->mp_upper - node_size; - mdb_cassert(mc, ofs >= mp->mp_lower + sizeof(indx_t)); - mp->mp_ptrs[indx] = ofs; - mp->mp_upper = ofs; - mp->mp_lower += sizeof(indx_t); - - /* Write the node data. */ - node = NODEPTR(mp, indx); - node->mn_ksize = (key == NULL) ? 0 : key->mv_size; - node->mn_flags = flags; - if (IS_LEAF(mp)) - SETDSZ(node,data->mv_size); - else - SETPGNO(node,pgno); - - if (key) - memcpy(NODEKEY(node), key->mv_data, key->mv_size); - - if (IS_LEAF(mp)) { - ndata = NODEDATA(node); - if (ofp == NULL) { - if (F_ISSET(flags, F_BIGDATA)) - memcpy(ndata, data->mv_data, sizeof(pgno_t)); - else if (F_ISSET(flags, MDB_RESERVE)) - data->mv_data = ndata; - else - memcpy(ndata, data->mv_data, data->mv_size); - } else { - memcpy(ndata, &ofp->mp_pgno, sizeof(pgno_t)); - ndata = METADATA(ofp); - if (F_ISSET(flags, MDB_RESERVE)) - data->mv_data = ndata; - else - memcpy(ndata, data->mv_data, data->mv_size); - } - } - - return MDB_SUCCESS; - -full: - DPRINTF(("not enough room in page %"Yu", got %u ptrs", - mdb_dbg_pgno(mp), NUMKEYS(mp))); - DPRINTF(("upper-lower = %u - %u = %"Z"d", mp->mp_upper,mp->mp_lower,room)); - DPRINTF(("node size = %"Z"u", node_size)); - mc->mc_txn->mt_flags |= MDB_TXN_ERROR; - return MDB_PAGE_FULL; -} - -/** Delete the specified node from a page. - * @param[in] mc Cursor pointing to the node to delete. - * @param[in] ksize The size of a node. Only used if the page is - * part of a #MDB_DUPFIXED database. - */ -static void -mdb_node_del(MDB_cursor *mc, int ksize) -{ - MDB_page *mp = mc->mc_pg[mc->mc_top]; - indx_t indx = mc->mc_ki[mc->mc_top]; - unsigned int sz; - indx_t i, j, numkeys, ptr; - MDB_node *node; - char *base; - - DPRINTF(("delete node %u on %s page %"Yu, indx, - IS_LEAF(mp) ? "leaf" : "branch", mdb_dbg_pgno(mp))); - numkeys = NUMKEYS(mp); - mdb_cassert(mc, indx < numkeys); - - if (IS_LEAF2(mp)) { - int x = numkeys - 1 - indx; - base = LEAF2KEY(mp, indx, ksize); - if (x) - memmove(base, base + ksize, x * ksize); - mp->mp_lower -= sizeof(indx_t); - mp->mp_upper += ksize - sizeof(indx_t); - return; - } - - node = NODEPTR(mp, indx); - sz = NODESIZE + node->mn_ksize; - if (IS_LEAF(mp)) { - if (F_ISSET(node->mn_flags, F_BIGDATA)) - sz += sizeof(pgno_t); - else - sz += NODEDSZ(node); - } - sz = EVEN(sz); - - ptr = mp->mp_ptrs[indx]; - for (i = j = 0; i < numkeys; i++) { - if (i != indx) { - mp->mp_ptrs[j] = mp->mp_ptrs[i]; - if (mp->mp_ptrs[i] < ptr) - mp->mp_ptrs[j] += sz; - j++; - } - } - - base = (char *)mp + mp->mp_upper + PAGEBASE; - memmove(base + sz, base, ptr - mp->mp_upper); - - mp->mp_lower -= sizeof(indx_t); - mp->mp_upper += sz; -} - -/** Compact the main page after deleting a node on a subpage. - * @param[in] mp The main page to operate on. - * @param[in] indx The index of the subpage on the main page. - */ -static void -mdb_node_shrink(MDB_page *mp, indx_t indx) -{ - MDB_node *node; - MDB_page *sp, *xp; - char *base; - indx_t delta, nsize, len, ptr; - int i; - - node = NODEPTR(mp, indx); - sp = (MDB_page *)NODEDATA(node); - delta = SIZELEFT(sp); - nsize = NODEDSZ(node) - delta; - - /* Prepare to shift upward, set len = length(subpage part to shift) */ - if (IS_LEAF2(sp)) { - len = nsize; - if (nsize & 1) - return; /* do not make the node uneven-sized */ - } else { - xp = (MDB_page *)((char *)sp + delta); /* destination subpage */ - for (i = NUMKEYS(sp); --i >= 0; ) - xp->mp_ptrs[i] = sp->mp_ptrs[i] - delta; - len = PAGEHDRSZ; - } - sp->mp_upper = sp->mp_lower; - COPY_PGNO(sp->mp_pgno, mp->mp_pgno); - SETDSZ(node, nsize); - - /* Shift upward */ - base = (char *)mp + mp->mp_upper + PAGEBASE; - memmove(base + delta, base, (char *)sp + len - base); - - ptr = mp->mp_ptrs[indx]; - for (i = NUMKEYS(mp); --i >= 0; ) { - if (mp->mp_ptrs[i] <= ptr) - mp->mp_ptrs[i] += delta; - } - mp->mp_upper += delta; -} - -/** Initial setup of a sorted-dups cursor. - * Sorted duplicates are implemented as a sub-database for the given key. - * The duplicate data items are actually keys of the sub-database. - * Operations on the duplicate data items are performed using a sub-cursor - * initialized when the sub-database is first accessed. This function does - * the preliminary setup of the sub-cursor, filling in the fields that - * depend only on the parent DB. - * @param[in] mc The main cursor whose sorted-dups cursor is to be initialized. - */ -static void -mdb_xcursor_init0(MDB_cursor *mc) -{ - MDB_xcursor *mx = mc->mc_xcursor; - - mx->mx_cursor.mc_xcursor = NULL; - mx->mx_cursor.mc_txn = mc->mc_txn; - mx->mx_cursor.mc_db = &mx->mx_db; - mx->mx_cursor.mc_dbx = &mx->mx_dbx; - mx->mx_cursor.mc_dbi = mc->mc_dbi; - mx->mx_cursor.mc_dbflag = &mx->mx_dbflag; - mx->mx_cursor.mc_snum = 0; - mx->mx_cursor.mc_top = 0; - MC_SET_OVPG(&mx->mx_cursor, NULL); - mx->mx_cursor.mc_flags = C_SUB | (mc->mc_flags & (C_ORIG_RDONLY|C_WRITEMAP)); - mx->mx_dbx.md_name.mv_size = 0; - mx->mx_dbx.md_name.mv_data = NULL; - mx->mx_dbx.md_cmp = mc->mc_dbx->md_dcmp; - mx->mx_dbx.md_dcmp = NULL; - mx->mx_dbx.md_rel = mc->mc_dbx->md_rel; -} - -/** Final setup of a sorted-dups cursor. - * Sets up the fields that depend on the data from the main cursor. - * @param[in] mc The main cursor whose sorted-dups cursor is to be initialized. - * @param[in] node The data containing the #MDB_db record for the - * sorted-dup database. - */ -static void -mdb_xcursor_init1(MDB_cursor *mc, MDB_node *node) -{ - MDB_xcursor *mx = mc->mc_xcursor; - - mx->mx_cursor.mc_flags &= C_SUB|C_ORIG_RDONLY|C_WRITEMAP; - if (node->mn_flags & F_SUBDATA) { - memcpy(&mx->mx_db, NODEDATA(node), sizeof(MDB_db)); - mx->mx_cursor.mc_pg[0] = 0; - mx->mx_cursor.mc_snum = 0; - mx->mx_cursor.mc_top = 0; - } else { - MDB_page *fp = NODEDATA(node); - mx->mx_db.md_pad = 0; - mx->mx_db.md_flags = 0; - mx->mx_db.md_depth = 1; - mx->mx_db.md_branch_pages = 0; - mx->mx_db.md_leaf_pages = 1; - mx->mx_db.md_overflow_pages = 0; - mx->mx_db.md_entries = NUMKEYS(fp); - COPY_PGNO(mx->mx_db.md_root, fp->mp_pgno); - mx->mx_cursor.mc_snum = 1; - mx->mx_cursor.mc_top = 0; - mx->mx_cursor.mc_flags |= C_INITIALIZED; - mx->mx_cursor.mc_pg[0] = fp; - mx->mx_cursor.mc_ki[0] = 0; - if (mc->mc_db->md_flags & MDB_DUPFIXED) { - mx->mx_db.md_flags = MDB_DUPFIXED; - mx->mx_db.md_pad = fp->mp_pad; - if (mc->mc_db->md_flags & MDB_INTEGERDUP) - mx->mx_db.md_flags |= MDB_INTEGERKEY; - } - } - DPRINTF(("Sub-db -%u root page %"Yu, mx->mx_cursor.mc_dbi, - mx->mx_db.md_root)); - mx->mx_dbflag = DB_VALID|DB_USRVALID|DB_DUPDATA; - if (NEED_CMP_CLONG(mx->mx_dbx.md_cmp, mx->mx_db.md_pad)) - mx->mx_dbx.md_cmp = mdb_cmp_clong; -} - - -/** Fixup a sorted-dups cursor due to underlying update. - * Sets up some fields that depend on the data from the main cursor. - * Almost the same as init1, but skips initialization steps if the - * xcursor had already been used. - * @param[in] mc The main cursor whose sorted-dups cursor is to be fixed up. - * @param[in] src_mx The xcursor of an up-to-date cursor. - * @param[in] new_dupdata True if converting from a non-#F_DUPDATA item. - */ -static void -mdb_xcursor_init2(MDB_cursor *mc, MDB_xcursor *src_mx, int new_dupdata) -{ - MDB_xcursor *mx = mc->mc_xcursor; - - if (new_dupdata) { - mx->mx_cursor.mc_snum = 1; - mx->mx_cursor.mc_top = 0; - mx->mx_cursor.mc_flags |= C_INITIALIZED; - mx->mx_cursor.mc_ki[0] = 0; - mx->mx_dbflag = DB_VALID|DB_USRVALID|DB_DUPDATA; -#if UINT_MAX < MDB_SIZE_MAX /* matches mdb_xcursor_init1:NEED_CMP_CLONG() */ - mx->mx_dbx.md_cmp = src_mx->mx_dbx.md_cmp; -#endif - } else if (!(mx->mx_cursor.mc_flags & C_INITIALIZED)) { - return; - } - mx->mx_db = src_mx->mx_db; - mx->mx_cursor.mc_pg[0] = src_mx->mx_cursor.mc_pg[0]; - DPRINTF(("Sub-db -%u root page %"Yu, mx->mx_cursor.mc_dbi, - mx->mx_db.md_root)); -} - -/** Initialize a cursor for a given transaction and database. */ -static void -mdb_cursor_init(MDB_cursor *mc, MDB_txn *txn, MDB_dbi dbi, MDB_xcursor *mx) -{ - mc->mc_next = NULL; - mc->mc_backup = NULL; - mc->mc_dbi = dbi; - mc->mc_txn = txn; - mc->mc_db = &txn->mt_dbs[dbi]; - mc->mc_dbx = &txn->mt_dbxs[dbi]; - mc->mc_dbflag = &txn->mt_dbflags[dbi]; - mc->mc_snum = 0; - mc->mc_top = 0; - mc->mc_pg[0] = 0; - mc->mc_ki[0] = 0; - MC_SET_OVPG(mc, NULL); - mc->mc_flags = txn->mt_flags & (C_ORIG_RDONLY|C_WRITEMAP); - if (txn->mt_dbs[dbi].md_flags & MDB_DUPSORT) { - mdb_tassert(txn, mx != NULL); - mc->mc_xcursor = mx; - mdb_xcursor_init0(mc); - } else { - mc->mc_xcursor = NULL; - } - if (*mc->mc_dbflag & DB_STALE) { - mdb_page_search(mc, NULL, MDB_PS_ROOTONLY); - } -} - -int -mdb_cursor_open(MDB_txn *txn, MDB_dbi dbi, MDB_cursor **ret) -{ - MDB_cursor *mc; - size_t size = sizeof(MDB_cursor); - - if (!ret || !TXN_DBI_EXIST(txn, dbi, DB_VALID)) - return EINVAL; - - if (txn->mt_flags & MDB_TXN_BLOCKED) - return MDB_BAD_TXN; - - if (dbi == FREE_DBI && !F_ISSET(txn->mt_flags, MDB_TXN_RDONLY)) - return EINVAL; - - if (txn->mt_dbs[dbi].md_flags & MDB_DUPSORT) - size += sizeof(MDB_xcursor); - - if ((mc = malloc(size)) != NULL) { - mdb_cursor_init(mc, txn, dbi, (MDB_xcursor *)(mc + 1)); - if (txn->mt_cursors) { - mc->mc_next = txn->mt_cursors[dbi]; - txn->mt_cursors[dbi] = mc; - mc->mc_flags |= C_UNTRACK; - } - } else { - return ENOMEM; - } - - *ret = mc; - - return MDB_SUCCESS; -} - -int -mdb_cursor_renew(MDB_txn *txn, MDB_cursor *mc) -{ - if (!mc || !TXN_DBI_EXIST(txn, mc->mc_dbi, DB_VALID)) - return EINVAL; - - if ((mc->mc_flags & C_UNTRACK) || txn->mt_cursors) - return EINVAL; - - if (txn->mt_flags & MDB_TXN_BLOCKED) - return MDB_BAD_TXN; - - mdb_cursor_init(mc, txn, mc->mc_dbi, mc->mc_xcursor); - return MDB_SUCCESS; -} - -/* Return the count of duplicate data items for the current key */ -int -mdb_cursor_count(MDB_cursor *mc, mdb_size_t *countp) -{ - MDB_node *leaf; - - if (mc == NULL || countp == NULL) - return EINVAL; - - if (mc->mc_xcursor == NULL) - return MDB_INCOMPATIBLE; - - if (mc->mc_txn->mt_flags & MDB_TXN_BLOCKED) - return MDB_BAD_TXN; - - if (!(mc->mc_flags & C_INITIALIZED)) - return EINVAL; - - if (!mc->mc_snum) - return MDB_NOTFOUND; - - if (mc->mc_flags & C_EOF) { - if (mc->mc_ki[mc->mc_top] >= NUMKEYS(mc->mc_pg[mc->mc_top])) - return MDB_NOTFOUND; - mc->mc_flags ^= C_EOF; - } - - leaf = NODEPTR(mc->mc_pg[mc->mc_top], mc->mc_ki[mc->mc_top]); - if (!F_ISSET(leaf->mn_flags, F_DUPDATA)) { - *countp = 1; - } else { - if (!(mc->mc_xcursor->mx_cursor.mc_flags & C_INITIALIZED)) - return EINVAL; - - *countp = mc->mc_xcursor->mx_db.md_entries; - } - return MDB_SUCCESS; -} - -void -mdb_cursor_close(MDB_cursor *mc) -{ - if (mc) { - MDB_CURSOR_UNREF(mc, 0); - } - if (mc && !mc->mc_backup) { - /* Remove from txn, if tracked. - * A read-only txn (!C_UNTRACK) may have been freed already, - * so do not peek inside it. Only write txns track cursors. - */ - if ((mc->mc_flags & C_UNTRACK) && mc->mc_txn->mt_cursors) { - MDB_cursor **prev = &mc->mc_txn->mt_cursors[mc->mc_dbi]; - while (*prev && *prev != mc) prev = &(*prev)->mc_next; - if (*prev == mc) - *prev = mc->mc_next; - } - free(mc); - } -} - -MDB_txn * -mdb_cursor_txn(MDB_cursor *mc) -{ - if (!mc) return NULL; - return mc->mc_txn; -} - -MDB_dbi -mdb_cursor_dbi(MDB_cursor *mc) -{ - return mc->mc_dbi; -} - -/** Replace the key for a branch node with a new key. - * Set #MDB_TXN_ERROR on failure. - * @param[in] mc Cursor pointing to the node to operate on. - * @param[in] key The new key to use. - * @return 0 on success, non-zero on failure. - */ -static int -mdb_update_key(MDB_cursor *mc, MDB_val *key) -{ - MDB_page *mp; - MDB_node *node; - char *base; - size_t len; - int delta, ksize, oksize; - indx_t ptr, i, numkeys, indx; - DKBUF; - - indx = mc->mc_ki[mc->mc_top]; - mp = mc->mc_pg[mc->mc_top]; - node = NODEPTR(mp, indx); - ptr = mp->mp_ptrs[indx]; -#if MDB_DEBUG - { - MDB_val k2; - char kbuf2[DKBUF_MAXKEYSIZE*2+1]; - k2.mv_data = NODEKEY(node); - k2.mv_size = node->mn_ksize; - DPRINTF(("update key %u (ofs %u) [%s] to [%s] on page %"Yu, - indx, ptr, - mdb_dkey(&k2, kbuf2), - DKEY(key), - mp->mp_pgno)); - } -#endif - - /* Sizes must be 2-byte aligned. */ - ksize = EVEN(key->mv_size); - oksize = EVEN(node->mn_ksize); - delta = ksize - oksize; - - /* Shift node contents if EVEN(key length) changed. */ - if (delta) { - if (delta > 0 && SIZELEFT(mp) < delta) { - pgno_t pgno; - /* not enough space left, do a delete and split */ - DPRINTF(("Not enough room, delta = %d, splitting...", delta)); - pgno = NODEPGNO(node); - mdb_node_del(mc, 0); - return mdb_page_split(mc, key, NULL, pgno, MDB_SPLIT_REPLACE); - } - - numkeys = NUMKEYS(mp); - for (i = 0; i < numkeys; i++) { - if (mp->mp_ptrs[i] <= ptr) - mp->mp_ptrs[i] -= delta; - } - - base = (char *)mp + mp->mp_upper + PAGEBASE; - len = ptr - mp->mp_upper + NODESIZE; - memmove(base - delta, base, len); - mp->mp_upper -= delta; - - node = NODEPTR(mp, indx); - } - - /* But even if no shift was needed, update ksize */ - if (node->mn_ksize != key->mv_size) - node->mn_ksize = key->mv_size; - - if (key->mv_size) - memcpy(NODEKEY(node), key->mv_data, key->mv_size); - - return MDB_SUCCESS; -} - -static void -mdb_cursor_copy(const MDB_cursor *csrc, MDB_cursor *cdst); - -/** Perform \b act while tracking temporary cursor \b mn */ -#define WITH_CURSOR_TRACKING(mn, act) do { \ - MDB_cursor dummy, *tracked, **tp = &(mn).mc_txn->mt_cursors[mn.mc_dbi]; \ - if ((mn).mc_flags & C_SUB) { \ - dummy.mc_flags = C_INITIALIZED; \ - dummy.mc_xcursor = (MDB_xcursor *)&(mn); \ - tracked = &dummy; \ - } else { \ - tracked = &(mn); \ - } \ - tracked->mc_next = *tp; \ - *tp = tracked; \ - { act; } \ - *tp = tracked->mc_next; \ -} while (0) - -/** Move a node from csrc to cdst. - */ -static int -mdb_node_move(MDB_cursor *csrc, MDB_cursor *cdst, int fromleft) -{ - MDB_node *srcnode; - MDB_val key, data; - pgno_t srcpg; - MDB_cursor mn; - int rc; - unsigned short flags; - - DKBUF; - - /* Mark src and dst as dirty. */ - if ((rc = mdb_page_touch(csrc)) || - (rc = mdb_page_touch(cdst))) - return rc; - - if (IS_LEAF2(csrc->mc_pg[csrc->mc_top])) { - key.mv_size = csrc->mc_db->md_pad; - key.mv_data = LEAF2KEY(csrc->mc_pg[csrc->mc_top], csrc->mc_ki[csrc->mc_top], key.mv_size); - data.mv_size = 0; - data.mv_data = NULL; - srcpg = 0; - flags = 0; - } else { - srcnode = NODEPTR(csrc->mc_pg[csrc->mc_top], csrc->mc_ki[csrc->mc_top]); - mdb_cassert(csrc, !((size_t)srcnode & 1)); - srcpg = NODEPGNO(srcnode); - flags = srcnode->mn_flags; - if (csrc->mc_ki[csrc->mc_top] == 0 && IS_BRANCH(csrc->mc_pg[csrc->mc_top])) { - unsigned int snum = csrc->mc_snum; - MDB_node *s2; - /* must find the lowest key below src */ - rc = mdb_page_search_lowest(csrc); - if (rc) - return rc; - if (IS_LEAF2(csrc->mc_pg[csrc->mc_top])) { - key.mv_size = csrc->mc_db->md_pad; - key.mv_data = LEAF2KEY(csrc->mc_pg[csrc->mc_top], 0, key.mv_size); - } else { - s2 = NODEPTR(csrc->mc_pg[csrc->mc_top], 0); - key.mv_size = NODEKSZ(s2); - key.mv_data = NODEKEY(s2); - } - csrc->mc_snum = snum--; - csrc->mc_top = snum; - } else { - key.mv_size = NODEKSZ(srcnode); - key.mv_data = NODEKEY(srcnode); - } - data.mv_size = NODEDSZ(srcnode); - data.mv_data = NODEDATA(srcnode); - } - mn.mc_xcursor = NULL; - if (IS_BRANCH(cdst->mc_pg[cdst->mc_top]) && cdst->mc_ki[cdst->mc_top] == 0) { - unsigned int snum = cdst->mc_snum; - MDB_node *s2; - MDB_val bkey; - /* must find the lowest key below dst */ - mdb_cursor_copy(cdst, &mn); - rc = mdb_page_search_lowest(&mn); - if (rc) - return rc; - if (IS_LEAF2(mn.mc_pg[mn.mc_top])) { - bkey.mv_size = mn.mc_db->md_pad; - bkey.mv_data = LEAF2KEY(mn.mc_pg[mn.mc_top], 0, bkey.mv_size); - } else { - s2 = NODEPTR(mn.mc_pg[mn.mc_top], 0); - bkey.mv_size = NODEKSZ(s2); - bkey.mv_data = NODEKEY(s2); - } - mn.mc_snum = snum--; - mn.mc_top = snum; - mn.mc_ki[snum] = 0; - rc = mdb_update_key(&mn, &bkey); - if (rc) - return rc; - } - - DPRINTF(("moving %s node %u [%s] on page %"Yu" to node %u on page %"Yu, - IS_LEAF(csrc->mc_pg[csrc->mc_top]) ? "leaf" : "branch", - csrc->mc_ki[csrc->mc_top], - DKEY(&key), - csrc->mc_pg[csrc->mc_top]->mp_pgno, - cdst->mc_ki[cdst->mc_top], cdst->mc_pg[cdst->mc_top]->mp_pgno)); - - /* Add the node to the destination page. - */ - rc = mdb_node_add(cdst, cdst->mc_ki[cdst->mc_top], &key, &data, srcpg, flags); - if (rc != MDB_SUCCESS) - return rc; - - /* Delete the node from the source page. - */ - mdb_node_del(csrc, key.mv_size); - - { - /* Adjust other cursors pointing to mp */ - MDB_cursor *m2, *m3; - MDB_dbi dbi = csrc->mc_dbi; - MDB_page *mpd, *mps; - - mps = csrc->mc_pg[csrc->mc_top]; - /* If we're adding on the left, bump others up */ - if (fromleft) { - mpd = cdst->mc_pg[csrc->mc_top]; - for (m2 = csrc->mc_txn->mt_cursors[dbi]; m2; m2=m2->mc_next) { - if (csrc->mc_flags & C_SUB) - m3 = &m2->mc_xcursor->mx_cursor; - else - m3 = m2; - if (!(m3->mc_flags & C_INITIALIZED) || m3->mc_top < csrc->mc_top) - continue; - if (m3 != cdst && - m3->mc_pg[csrc->mc_top] == mpd && - m3->mc_ki[csrc->mc_top] >= cdst->mc_ki[csrc->mc_top]) { - m3->mc_ki[csrc->mc_top]++; - } - if (m3 !=csrc && - m3->mc_pg[csrc->mc_top] == mps && - m3->mc_ki[csrc->mc_top] == csrc->mc_ki[csrc->mc_top]) { - m3->mc_pg[csrc->mc_top] = cdst->mc_pg[cdst->mc_top]; - m3->mc_ki[csrc->mc_top] = cdst->mc_ki[cdst->mc_top]; - m3->mc_ki[csrc->mc_top-1]++; - } - if (IS_LEAF(mps)) - XCURSOR_REFRESH(m3, csrc->mc_top, m3->mc_pg[csrc->mc_top]); - } - } else - /* Adding on the right, bump others down */ - { - for (m2 = csrc->mc_txn->mt_cursors[dbi]; m2; m2=m2->mc_next) { - if (csrc->mc_flags & C_SUB) - m3 = &m2->mc_xcursor->mx_cursor; - else - m3 = m2; - if (m3 == csrc) continue; - if (!(m3->mc_flags & C_INITIALIZED) || m3->mc_top < csrc->mc_top) - continue; - if (m3->mc_pg[csrc->mc_top] == mps) { - if (!m3->mc_ki[csrc->mc_top]) { - m3->mc_pg[csrc->mc_top] = cdst->mc_pg[cdst->mc_top]; - m3->mc_ki[csrc->mc_top] = cdst->mc_ki[cdst->mc_top]; - m3->mc_ki[csrc->mc_top-1]--; - } else { - m3->mc_ki[csrc->mc_top]--; - } - if (IS_LEAF(mps)) - XCURSOR_REFRESH(m3, csrc->mc_top, m3->mc_pg[csrc->mc_top]); - } - } - } - } - - /* Update the parent separators. - */ - if (csrc->mc_ki[csrc->mc_top] == 0) { - if (csrc->mc_ki[csrc->mc_top-1] != 0) { - if (IS_LEAF2(csrc->mc_pg[csrc->mc_top])) { - key.mv_data = LEAF2KEY(csrc->mc_pg[csrc->mc_top], 0, key.mv_size); - } else { - srcnode = NODEPTR(csrc->mc_pg[csrc->mc_top], 0); - key.mv_size = NODEKSZ(srcnode); - key.mv_data = NODEKEY(srcnode); - } - DPRINTF(("update separator for source page %"Yu" to [%s]", - csrc->mc_pg[csrc->mc_top]->mp_pgno, DKEY(&key))); - mdb_cursor_copy(csrc, &mn); - mn.mc_snum--; - mn.mc_top--; - /* We want mdb_rebalance to find mn when doing fixups */ - WITH_CURSOR_TRACKING(mn, - rc = mdb_update_key(&mn, &key)); - if (rc) - return rc; - } - if (IS_BRANCH(csrc->mc_pg[csrc->mc_top])) { - MDB_val nullkey; - indx_t ix = csrc->mc_ki[csrc->mc_top]; - nullkey.mv_size = 0; - csrc->mc_ki[csrc->mc_top] = 0; - rc = mdb_update_key(csrc, &nullkey); - csrc->mc_ki[csrc->mc_top] = ix; - mdb_cassert(csrc, rc == MDB_SUCCESS); - } - } - - if (cdst->mc_ki[cdst->mc_top] == 0) { - if (cdst->mc_ki[cdst->mc_top-1] != 0) { - if (IS_LEAF2(csrc->mc_pg[csrc->mc_top])) { - key.mv_data = LEAF2KEY(cdst->mc_pg[cdst->mc_top], 0, key.mv_size); - } else { - srcnode = NODEPTR(cdst->mc_pg[cdst->mc_top], 0); - key.mv_size = NODEKSZ(srcnode); - key.mv_data = NODEKEY(srcnode); - } - DPRINTF(("update separator for destination page %"Yu" to [%s]", - cdst->mc_pg[cdst->mc_top]->mp_pgno, DKEY(&key))); - mdb_cursor_copy(cdst, &mn); - mn.mc_snum--; - mn.mc_top--; - /* We want mdb_rebalance to find mn when doing fixups */ - WITH_CURSOR_TRACKING(mn, - rc = mdb_update_key(&mn, &key)); - if (rc) - return rc; - } - if (IS_BRANCH(cdst->mc_pg[cdst->mc_top])) { - MDB_val nullkey; - indx_t ix = cdst->mc_ki[cdst->mc_top]; - nullkey.mv_size = 0; - cdst->mc_ki[cdst->mc_top] = 0; - rc = mdb_update_key(cdst, &nullkey); - cdst->mc_ki[cdst->mc_top] = ix; - mdb_cassert(cdst, rc == MDB_SUCCESS); - } - } - - return MDB_SUCCESS; -} - -/** Merge one page into another. - * The nodes from the page pointed to by \b csrc will - * be copied to the page pointed to by \b cdst and then - * the \b csrc page will be freed. - * @param[in] csrc Cursor pointing to the source page. - * @param[in] cdst Cursor pointing to the destination page. - * @return 0 on success, non-zero on failure. - */ -static int -mdb_page_merge(MDB_cursor *csrc, MDB_cursor *cdst) -{ - MDB_page *psrc, *pdst; - MDB_node *srcnode; - MDB_val key, data; - unsigned nkeys; - int rc; - indx_t i, j; - - psrc = csrc->mc_pg[csrc->mc_top]; - pdst = cdst->mc_pg[cdst->mc_top]; - - DPRINTF(("merging page %"Yu" into %"Yu, psrc->mp_pgno, pdst->mp_pgno)); - - mdb_cassert(csrc, csrc->mc_snum > 1); /* can't merge root page */ - mdb_cassert(csrc, cdst->mc_snum > 1); - - /* Mark dst as dirty. */ - if ((rc = mdb_page_touch(cdst))) - return rc; - - /* get dst page again now that we've touched it. */ - pdst = cdst->mc_pg[cdst->mc_top]; - - /* Move all nodes from src to dst. - */ - j = nkeys = NUMKEYS(pdst); - if (IS_LEAF2(psrc)) { - key.mv_size = csrc->mc_db->md_pad; - key.mv_data = METADATA(psrc); - for (i = 0; i < NUMKEYS(psrc); i++, j++) { - rc = mdb_node_add(cdst, j, &key, NULL, 0, 0); - if (rc != MDB_SUCCESS) - return rc; - key.mv_data = (char *)key.mv_data + key.mv_size; - } - } else { - for (i = 0; i < NUMKEYS(psrc); i++, j++) { - srcnode = NODEPTR(psrc, i); - if (i == 0 && IS_BRANCH(psrc)) { - MDB_cursor mn; - MDB_node *s2; - mdb_cursor_copy(csrc, &mn); - mn.mc_xcursor = NULL; - /* must find the lowest key below src */ - rc = mdb_page_search_lowest(&mn); - if (rc) - return rc; - if (IS_LEAF2(mn.mc_pg[mn.mc_top])) { - key.mv_size = mn.mc_db->md_pad; - key.mv_data = LEAF2KEY(mn.mc_pg[mn.mc_top], 0, key.mv_size); - } else { - s2 = NODEPTR(mn.mc_pg[mn.mc_top], 0); - key.mv_size = NODEKSZ(s2); - key.mv_data = NODEKEY(s2); - } - } else { - key.mv_size = srcnode->mn_ksize; - key.mv_data = NODEKEY(srcnode); - } - - data.mv_size = NODEDSZ(srcnode); - data.mv_data = NODEDATA(srcnode); - rc = mdb_node_add(cdst, j, &key, &data, NODEPGNO(srcnode), srcnode->mn_flags); - if (rc != MDB_SUCCESS) - return rc; - } - } - - DPRINTF(("dst page %"Yu" now has %u keys (%.1f%% filled)", - pdst->mp_pgno, NUMKEYS(pdst), - (float)PAGEFILL(cdst->mc_txn->mt_env, pdst) / 10)); - - /* Unlink the src page from parent and add to free list. - */ - csrc->mc_top--; - mdb_node_del(csrc, 0); - if (csrc->mc_ki[csrc->mc_top] == 0) { - key.mv_size = 0; - rc = mdb_update_key(csrc, &key); - if (rc) { - csrc->mc_top++; - return rc; - } - } - csrc->mc_top++; - - psrc = csrc->mc_pg[csrc->mc_top]; - /* If not operating on FreeDB, allow this page to be reused - * in this txn. Otherwise just add to free list. - */ - rc = mdb_page_loose(csrc, psrc); - if (rc) - return rc; - if (IS_LEAF(psrc)) - csrc->mc_db->md_leaf_pages--; - else - csrc->mc_db->md_branch_pages--; - { - /* Adjust other cursors pointing to mp */ - MDB_cursor *m2, *m3; - MDB_dbi dbi = csrc->mc_dbi; - unsigned int top = csrc->mc_top; - - for (m2 = csrc->mc_txn->mt_cursors[dbi]; m2; m2=m2->mc_next) { - if (csrc->mc_flags & C_SUB) - m3 = &m2->mc_xcursor->mx_cursor; - else - m3 = m2; - if (m3 == csrc) continue; - if (m3->mc_snum < csrc->mc_snum) continue; - if (m3->mc_pg[top] == psrc) { - m3->mc_pg[top] = pdst; - m3->mc_ki[top] += nkeys; - m3->mc_ki[top-1] = cdst->mc_ki[top-1]; - } else if (m3->mc_pg[top-1] == csrc->mc_pg[top-1] && - m3->mc_ki[top-1] > csrc->mc_ki[top-1]) { - m3->mc_ki[top-1]--; - } - if (IS_LEAF(psrc)) - XCURSOR_REFRESH(m3, top, m3->mc_pg[top]); - } - } - { - unsigned int snum = cdst->mc_snum; - uint16_t depth = cdst->mc_db->md_depth; - mdb_cursor_pop(cdst); - rc = mdb_rebalance(cdst); - /* Did the tree height change? */ - if (depth != cdst->mc_db->md_depth) - snum += cdst->mc_db->md_depth - depth; - cdst->mc_snum = snum; - cdst->mc_top = snum-1; - } - return rc; -} - -/** Copy the contents of a cursor. - * @param[in] csrc The cursor to copy from. - * @param[out] cdst The cursor to copy to. - */ -static void -mdb_cursor_copy(const MDB_cursor *csrc, MDB_cursor *cdst) -{ - unsigned int i; - - cdst->mc_txn = csrc->mc_txn; - cdst->mc_dbi = csrc->mc_dbi; - cdst->mc_db = csrc->mc_db; - cdst->mc_dbx = csrc->mc_dbx; - cdst->mc_snum = csrc->mc_snum; - cdst->mc_top = csrc->mc_top; - cdst->mc_flags = csrc->mc_flags; - MC_SET_OVPG(cdst, MC_OVPG(csrc)); - - for (i=0; imc_snum; i++) { - cdst->mc_pg[i] = csrc->mc_pg[i]; - cdst->mc_ki[i] = csrc->mc_ki[i]; - } -} - -/** Rebalance the tree after a delete operation. - * @param[in] mc Cursor pointing to the page where rebalancing - * should begin. - * @return 0 on success, non-zero on failure. - */ -static int -mdb_rebalance(MDB_cursor *mc) -{ - MDB_node *node; - int rc, fromleft; - unsigned int ptop, minkeys, thresh; - MDB_cursor mn; - indx_t oldki; - - if (IS_BRANCH(mc->mc_pg[mc->mc_top])) { - minkeys = 2; - thresh = 1; - } else { - minkeys = 1; - thresh = FILL_THRESHOLD; - } - DPRINTF(("rebalancing %s page %"Yu" (has %u keys, %.1f%% full)", - IS_LEAF(mc->mc_pg[mc->mc_top]) ? "leaf" : "branch", - mdb_dbg_pgno(mc->mc_pg[mc->mc_top]), NUMKEYS(mc->mc_pg[mc->mc_top]), - (float)PAGEFILL(mc->mc_txn->mt_env, mc->mc_pg[mc->mc_top]) / 10)); - - if (PAGEFILL(mc->mc_txn->mt_env, mc->mc_pg[mc->mc_top]) >= thresh && - NUMKEYS(mc->mc_pg[mc->mc_top]) >= minkeys) { - DPRINTF(("no need to rebalance page %"Yu", above fill threshold", - mdb_dbg_pgno(mc->mc_pg[mc->mc_top]))); - return MDB_SUCCESS; - } - - if (mc->mc_snum < 2) { - MDB_page *mp = mc->mc_pg[0]; - if (IS_SUBP(mp)) { - DPUTS("Can't rebalance a subpage, ignoring"); - return MDB_SUCCESS; - } - if (NUMKEYS(mp) == 0) { - DPUTS("tree is completely empty"); - mc->mc_db->md_root = P_INVALID; - mc->mc_db->md_depth = 0; - mc->mc_db->md_leaf_pages = 0; - rc = mdb_midl_append(&mc->mc_txn->mt_free_pgs, mp->mp_pgno); - if (rc) - return rc; - /* Adjust cursors pointing to mp */ - mc->mc_snum = 0; - mc->mc_top = 0; - mc->mc_flags &= ~C_INITIALIZED; - { - MDB_cursor *m2, *m3; - MDB_dbi dbi = mc->mc_dbi; - - for (m2 = mc->mc_txn->mt_cursors[dbi]; m2; m2=m2->mc_next) { - if (mc->mc_flags & C_SUB) - m3 = &m2->mc_xcursor->mx_cursor; - else - m3 = m2; - if (!(m3->mc_flags & C_INITIALIZED) || (m3->mc_snum < mc->mc_snum)) - continue; - if (m3->mc_pg[0] == mp) { - m3->mc_snum = 0; - m3->mc_top = 0; - m3->mc_flags &= ~C_INITIALIZED; - } - } - } - } else if (IS_BRANCH(mp) && NUMKEYS(mp) == 1) { - int i; - DPUTS("collapsing root page!"); - rc = mdb_midl_append(&mc->mc_txn->mt_free_pgs, mp->mp_pgno); - if (rc) - return rc; - mc->mc_db->md_root = NODEPGNO(NODEPTR(mp, 0)); - rc = mdb_page_get(mc, mc->mc_db->md_root, &mc->mc_pg[0], NULL); - if (rc) - return rc; - mc->mc_db->md_depth--; - mc->mc_db->md_branch_pages--; - mc->mc_ki[0] = mc->mc_ki[1]; - for (i = 1; imc_db->md_depth; i++) { - mc->mc_pg[i] = mc->mc_pg[i+1]; - mc->mc_ki[i] = mc->mc_ki[i+1]; - } - { - /* Adjust other cursors pointing to mp */ - MDB_cursor *m2, *m3; - MDB_dbi dbi = mc->mc_dbi; - - for (m2 = mc->mc_txn->mt_cursors[dbi]; m2; m2=m2->mc_next) { - if (mc->mc_flags & C_SUB) - m3 = &m2->mc_xcursor->mx_cursor; - else - m3 = m2; - if (m3 == mc) continue; - if (!(m3->mc_flags & C_INITIALIZED)) - continue; - if (m3->mc_pg[0] == mp) { - for (i=0; imc_db->md_depth; i++) { - m3->mc_pg[i] = m3->mc_pg[i+1]; - m3->mc_ki[i] = m3->mc_ki[i+1]; - } - m3->mc_snum--; - m3->mc_top--; - } - } - } - } else - DPUTS("root page doesn't need rebalancing"); - return MDB_SUCCESS; - } - - /* The parent (branch page) must have at least 2 pointers, - * otherwise the tree is invalid. - */ - ptop = mc->mc_top-1; - mdb_cassert(mc, NUMKEYS(mc->mc_pg[ptop]) > 1); - - /* Leaf page fill factor is below the threshold. - * Try to move keys from left or right neighbor, or - * merge with a neighbor page. - */ - - /* Find neighbors. - */ - mdb_cursor_copy(mc, &mn); - mn.mc_xcursor = NULL; - - oldki = mc->mc_ki[mc->mc_top]; - if (mc->mc_ki[ptop] == 0) { - /* We're the leftmost leaf in our parent. - */ - DPUTS("reading right neighbor"); - mn.mc_ki[ptop]++; - node = NODEPTR(mc->mc_pg[ptop], mn.mc_ki[ptop]); - rc = mdb_page_get(mc, NODEPGNO(node), &mn.mc_pg[mn.mc_top], NULL); - if (rc) - return rc; - mn.mc_ki[mn.mc_top] = 0; - mc->mc_ki[mc->mc_top] = NUMKEYS(mc->mc_pg[mc->mc_top]); - fromleft = 0; - } else { - /* There is at least one neighbor to the left. - */ - DPUTS("reading left neighbor"); - mn.mc_ki[ptop]--; - node = NODEPTR(mc->mc_pg[ptop], mn.mc_ki[ptop]); - rc = mdb_page_get(mc, NODEPGNO(node), &mn.mc_pg[mn.mc_top], NULL); - if (rc) - return rc; - mn.mc_ki[mn.mc_top] = NUMKEYS(mn.mc_pg[mn.mc_top]) - 1; - mc->mc_ki[mc->mc_top] = 0; - fromleft = 1; - } - - DPRINTF(("found neighbor page %"Yu" (%u keys, %.1f%% full)", - mn.mc_pg[mn.mc_top]->mp_pgno, NUMKEYS(mn.mc_pg[mn.mc_top]), - (float)PAGEFILL(mc->mc_txn->mt_env, mn.mc_pg[mn.mc_top]) / 10)); - - /* If the neighbor page is above threshold and has enough keys, - * move one key from it. Otherwise we should try to merge them. - * (A branch page must never have less than 2 keys.) - */ - if (PAGEFILL(mc->mc_txn->mt_env, mn.mc_pg[mn.mc_top]) >= thresh && NUMKEYS(mn.mc_pg[mn.mc_top]) > minkeys) { - rc = mdb_node_move(&mn, mc, fromleft); - if (fromleft) { - /* if we inserted on left, bump position up */ - oldki++; - } - } else { - if (!fromleft) { - rc = mdb_page_merge(&mn, mc); - } else { - oldki += NUMKEYS(mn.mc_pg[mn.mc_top]); - mn.mc_ki[mn.mc_top] += mc->mc_ki[mn.mc_top] + 1; - /* We want mdb_rebalance to find mn when doing fixups */ - WITH_CURSOR_TRACKING(mn, - rc = mdb_page_merge(mc, &mn)); - mdb_cursor_copy(&mn, mc); - } - mc->mc_flags &= ~C_EOF; - } - mc->mc_ki[mc->mc_top] = oldki; - return rc; -} - -/** Complete a delete operation started by #mdb_cursor_del(). */ -static int -mdb_cursor_del0(MDB_cursor *mc) -{ - int rc; - MDB_page *mp; - indx_t ki; - unsigned int nkeys; - MDB_cursor *m2, *m3; - MDB_dbi dbi = mc->mc_dbi; - - ki = mc->mc_ki[mc->mc_top]; - mp = mc->mc_pg[mc->mc_top]; - mdb_node_del(mc, mc->mc_db->md_pad); - mc->mc_db->md_entries--; - { - /* Adjust other cursors pointing to mp */ - for (m2 = mc->mc_txn->mt_cursors[dbi]; m2; m2=m2->mc_next) { - m3 = (mc->mc_flags & C_SUB) ? &m2->mc_xcursor->mx_cursor : m2; - if (! (m2->mc_flags & m3->mc_flags & C_INITIALIZED)) - continue; - if (m3 == mc || m3->mc_snum < mc->mc_snum) - continue; - if (m3->mc_pg[mc->mc_top] == mp) { - if (m3->mc_ki[mc->mc_top] == ki) { - m3->mc_flags |= C_DEL; - if (mc->mc_db->md_flags & MDB_DUPSORT) { - /* Sub-cursor referred into dataset which is gone */ - m3->mc_xcursor->mx_cursor.mc_flags &= ~(C_INITIALIZED|C_EOF); - } - continue; - } else if (m3->mc_ki[mc->mc_top] > ki) { - m3->mc_ki[mc->mc_top]--; - } - XCURSOR_REFRESH(m3, mc->mc_top, mp); - } - } - } - rc = mdb_rebalance(mc); - - if (rc == MDB_SUCCESS) { - /* DB is totally empty now, just bail out. - * Other cursors adjustments were already done - * by mdb_rebalance and aren't needed here. - */ - if (!mc->mc_snum) - return rc; - - mp = mc->mc_pg[mc->mc_top]; - nkeys = NUMKEYS(mp); - - /* Adjust other cursors pointing to mp */ - for (m2 = mc->mc_txn->mt_cursors[dbi]; !rc && m2; m2=m2->mc_next) { - m3 = (mc->mc_flags & C_SUB) ? &m2->mc_xcursor->mx_cursor : m2; - if (! (m2->mc_flags & m3->mc_flags & C_INITIALIZED)) - continue; - if (m3->mc_snum < mc->mc_snum) - continue; - if (m3->mc_pg[mc->mc_top] == mp) { - /* if m3 points past last node in page, find next sibling */ - if (m3->mc_ki[mc->mc_top] >= mc->mc_ki[mc->mc_top]) { - if (m3->mc_ki[mc->mc_top] >= nkeys) { - rc = mdb_cursor_sibling(m3, 1); - if (rc == MDB_NOTFOUND) { - m3->mc_flags |= C_EOF; - rc = MDB_SUCCESS; - continue; - } - } - if (mc->mc_db->md_flags & MDB_DUPSORT) { - MDB_node *node = NODEPTR(m3->mc_pg[m3->mc_top], m3->mc_ki[m3->mc_top]); - /* If this node has dupdata, it may need to be reinited - * because its data has moved. - * If the xcursor was not initd it must be reinited. - * Else if node points to a subDB, nothing is needed. - * Else (xcursor was initd, not a subDB) needs mc_pg[0] reset. - */ - if (node->mn_flags & F_DUPDATA) { - if (m3->mc_xcursor->mx_cursor.mc_flags & C_INITIALIZED) { - if (!(node->mn_flags & F_SUBDATA)) - m3->mc_xcursor->mx_cursor.mc_pg[0] = NODEDATA(node); - } else { - mdb_xcursor_init1(m3, node); - m3->mc_xcursor->mx_cursor.mc_flags |= C_DEL; - } - } - } - } - } - } - mc->mc_flags |= C_DEL; - } - - if (rc) - mc->mc_txn->mt_flags |= MDB_TXN_ERROR; - return rc; -} - -int -mdb_del(MDB_txn *txn, MDB_dbi dbi, - MDB_val *key, MDB_val *data) -{ - if (!key || !TXN_DBI_EXIST(txn, dbi, DB_USRVALID)) - return EINVAL; - - if (txn->mt_flags & (MDB_TXN_RDONLY|MDB_TXN_BLOCKED)) - return (txn->mt_flags & MDB_TXN_RDONLY) ? EACCES : MDB_BAD_TXN; - - if (!F_ISSET(txn->mt_dbs[dbi].md_flags, MDB_DUPSORT)) { - /* must ignore any data */ - data = NULL; - } - - return mdb_del0(txn, dbi, key, data, 0); -} - -static int -mdb_del0(MDB_txn *txn, MDB_dbi dbi, - MDB_val *key, MDB_val *data, unsigned flags) -{ - MDB_cursor mc; - MDB_xcursor mx; - MDB_cursor_op op; - MDB_val rdata, *xdata; - int rc, exact = 0; - DKBUF; - - DPRINTF(("====> delete db %u key [%s]", dbi, DKEY(key))); - - mdb_cursor_init(&mc, txn, dbi, &mx); - - if (data) { - op = MDB_GET_BOTH; - rdata = *data; - xdata = &rdata; - } else { - op = MDB_SET; - xdata = NULL; - flags |= MDB_NODUPDATA; - } - rc = mdb_cursor_set(&mc, key, xdata, op, &exact); - if (rc == 0) { - /* let mdb_page_split know about this cursor if needed: - * delete will trigger a rebalance; if it needs to move - * a node from one page to another, it will have to - * update the parent's separator key(s). If the new sepkey - * is larger than the current one, the parent page may - * run out of space, triggering a split. We need this - * cursor to be consistent until the end of the rebalance. - */ - mc.mc_next = txn->mt_cursors[dbi]; - txn->mt_cursors[dbi] = &mc; - rc = mdb_cursor_del(&mc, flags); - txn->mt_cursors[dbi] = mc.mc_next; - } - return rc; -} - -/** Split a page and insert a new node. - * Set #MDB_TXN_ERROR on failure. - * @param[in,out] mc Cursor pointing to the page and desired insertion index. - * The cursor will be updated to point to the actual page and index where - * the node got inserted after the split. - * @param[in] newkey The key for the newly inserted node. - * @param[in] newdata The data for the newly inserted node. - * @param[in] newpgno The page number, if the new node is a branch node. - * @param[in] nflags The #NODE_ADD_FLAGS for the new node. - * @return 0 on success, non-zero on failure. - */ -static int -mdb_page_split(MDB_cursor *mc, MDB_val *newkey, MDB_val *newdata, pgno_t newpgno, - unsigned int nflags) -{ - unsigned int flags; - int rc = MDB_SUCCESS, new_root = 0, did_split = 0; - indx_t newindx; - pgno_t pgno = 0; - int i, j, split_indx, nkeys, pmax; - MDB_env *env = mc->mc_txn->mt_env; - MDB_node *node; - MDB_val sepkey, rkey, xdata, *rdata = &xdata; - MDB_page *copy = NULL; - MDB_page *mp, *rp, *pp; - int ptop; - MDB_cursor mn; - DKBUF; - - mp = mc->mc_pg[mc->mc_top]; - newindx = mc->mc_ki[mc->mc_top]; - nkeys = NUMKEYS(mp); - - DPRINTF(("-----> splitting %s page %"Yu" and adding [%s] at index %i/%i", - IS_LEAF(mp) ? "leaf" : "branch", mp->mp_pgno, - DKEY(newkey), mc->mc_ki[mc->mc_top], nkeys)); - - /* Create a right sibling. */ - if ((rc = mdb_page_new(mc, mp->mp_flags, 1, &rp))) - return rc; - rp->mp_pad = mp->mp_pad; - DPRINTF(("new right sibling: page %"Yu, rp->mp_pgno)); - - /* Usually when splitting the root page, the cursor - * height is 1. But when called from mdb_update_key, - * the cursor height may be greater because it walks - * up the stack while finding the branch slot to update. - */ - if (mc->mc_top < 1) { - if ((rc = mdb_page_new(mc, P_BRANCH, 1, &pp))) - goto done; - /* shift current top to make room for new parent */ - for (i=mc->mc_snum; i>0; i--) { - mc->mc_pg[i] = mc->mc_pg[i-1]; - mc->mc_ki[i] = mc->mc_ki[i-1]; - } - mc->mc_pg[0] = pp; - mc->mc_ki[0] = 0; - mc->mc_db->md_root = pp->mp_pgno; - DPRINTF(("root split! new root = %"Yu, pp->mp_pgno)); - new_root = mc->mc_db->md_depth++; - - /* Add left (implicit) pointer. */ - if ((rc = mdb_node_add(mc, 0, NULL, NULL, mp->mp_pgno, 0)) != MDB_SUCCESS) { - /* undo the pre-push */ - mc->mc_pg[0] = mc->mc_pg[1]; - mc->mc_ki[0] = mc->mc_ki[1]; - mc->mc_db->md_root = mp->mp_pgno; - mc->mc_db->md_depth--; - goto done; - } - mc->mc_snum++; - mc->mc_top++; - ptop = 0; - } else { - ptop = mc->mc_top-1; - DPRINTF(("parent branch page is %"Yu, mc->mc_pg[ptop]->mp_pgno)); - } - - mdb_cursor_copy(mc, &mn); - mn.mc_xcursor = NULL; - mn.mc_pg[mn.mc_top] = rp; - mn.mc_ki[ptop] = mc->mc_ki[ptop]+1; - - if (nflags & MDB_APPEND) { - mn.mc_ki[mn.mc_top] = 0; - sepkey = *newkey; - split_indx = newindx; - nkeys = 0; - } else { - - split_indx = (nkeys+1) / 2; - - if (IS_LEAF2(rp)) { - char *split, *ins; - int x; - unsigned int lsize, rsize, ksize; - /* Move half of the keys to the right sibling */ - x = mc->mc_ki[mc->mc_top] - split_indx; - ksize = mc->mc_db->md_pad; - split = LEAF2KEY(mp, split_indx, ksize); - rsize = (nkeys - split_indx) * ksize; - lsize = (nkeys - split_indx) * sizeof(indx_t); - mp->mp_lower -= lsize; - rp->mp_lower += lsize; - mp->mp_upper += rsize - lsize; - rp->mp_upper -= rsize - lsize; - sepkey.mv_size = ksize; - if (newindx == split_indx) { - sepkey.mv_data = newkey->mv_data; - } else { - sepkey.mv_data = split; - } - if (x<0) { - ins = LEAF2KEY(mp, mc->mc_ki[mc->mc_top], ksize); - memcpy(rp->mp_ptrs, split, rsize); - sepkey.mv_data = rp->mp_ptrs; - memmove(ins+ksize, ins, (split_indx - mc->mc_ki[mc->mc_top]) * ksize); - memcpy(ins, newkey->mv_data, ksize); - mp->mp_lower += sizeof(indx_t); - mp->mp_upper -= ksize - sizeof(indx_t); - } else { - if (x) - memcpy(rp->mp_ptrs, split, x * ksize); - ins = LEAF2KEY(rp, x, ksize); - memcpy(ins, newkey->mv_data, ksize); - memcpy(ins+ksize, split + x * ksize, rsize - x * ksize); - rp->mp_lower += sizeof(indx_t); - rp->mp_upper -= ksize - sizeof(indx_t); - mc->mc_ki[mc->mc_top] = x; - } - } else { - int psize, nsize, k; - /* Maximum free space in an empty page */ - pmax = env->me_psize - PAGEHDRSZ; - if (IS_LEAF(mp)) - nsize = mdb_leaf_size(env, newkey, newdata); - else - nsize = mdb_branch_size(env, newkey); - nsize = EVEN(nsize); - - /* grab a page to hold a temporary copy */ - copy = mdb_page_malloc(mc->mc_txn, 1); - if (copy == NULL) { - rc = ENOMEM; - goto done; - } - copy->mp_pgno = mp->mp_pgno; - copy->mp_flags = mp->mp_flags; - copy->mp_lower = (PAGEHDRSZ-PAGEBASE); - copy->mp_upper = env->me_psize - PAGEBASE; - - /* prepare to insert */ - for (i=0, j=0; imp_ptrs[j++] = 0; - } - copy->mp_ptrs[j++] = mp->mp_ptrs[i]; - } - - /* When items are relatively large the split point needs - * to be checked, because being off-by-one will make the - * difference between success or failure in mdb_node_add. - * - * It's also relevant if a page happens to be laid out - * such that one half of its nodes are all "small" and - * the other half of its nodes are "large." If the new - * item is also "large" and falls on the half with - * "large" nodes, it also may not fit. - * - * As a final tweak, if the new item goes on the last - * spot on the page (and thus, onto the new page), bias - * the split so the new page is emptier than the old page. - * This yields better packing during sequential inserts. - */ - if (nkeys < 20 || nsize > pmax/16 || newindx >= nkeys) { - /* Find split point */ - psize = 0; - if (newindx <= split_indx || newindx >= nkeys) { - i = 0; j = 1; - k = newindx >= nkeys ? nkeys : split_indx+1+IS_LEAF(mp); - } else { - i = nkeys; j = -1; - k = split_indx-1; - } - for (; i!=k; i+=j) { - if (i == newindx) { - psize += nsize; - node = NULL; - } else { - node = (MDB_node *)((char *)mp + copy->mp_ptrs[i] + PAGEBASE); - psize += NODESIZE + NODEKSZ(node) + sizeof(indx_t); - if (IS_LEAF(mp)) { - if (F_ISSET(node->mn_flags, F_BIGDATA)) - psize += sizeof(pgno_t); - else - psize += NODEDSZ(node); - } - psize = EVEN(psize); - } - if (psize > pmax || i == k-j) { - split_indx = i + (j<0); - break; - } - } - } - if (split_indx == newindx) { - sepkey.mv_size = newkey->mv_size; - sepkey.mv_data = newkey->mv_data; - } else { - node = (MDB_node *)((char *)mp + copy->mp_ptrs[split_indx] + PAGEBASE); - sepkey.mv_size = node->mn_ksize; - sepkey.mv_data = NODEKEY(node); - } - } - } - - DPRINTF(("separator is %d [%s]", split_indx, DKEY(&sepkey))); - - /* Copy separator key to the parent. - */ - if (SIZELEFT(mn.mc_pg[ptop]) < mdb_branch_size(env, &sepkey)) { - int snum = mc->mc_snum; - mn.mc_snum--; - mn.mc_top--; - did_split = 1; - /* We want other splits to find mn when doing fixups */ - WITH_CURSOR_TRACKING(mn, - rc = mdb_page_split(&mn, &sepkey, NULL, rp->mp_pgno, 0)); - if (rc) - goto done; - - /* root split? */ - if (mc->mc_snum > snum) { - ptop++; - } - /* Right page might now have changed parent. - * Check if left page also changed parent. - */ - if (mn.mc_pg[ptop] != mc->mc_pg[ptop] && - mc->mc_ki[ptop] >= NUMKEYS(mc->mc_pg[ptop])) { - for (i=0; imc_pg[i] = mn.mc_pg[i]; - mc->mc_ki[i] = mn.mc_ki[i]; - } - mc->mc_pg[ptop] = mn.mc_pg[ptop]; - if (mn.mc_ki[ptop]) { - mc->mc_ki[ptop] = mn.mc_ki[ptop] - 1; - } else { - /* find right page's left sibling */ - mc->mc_ki[ptop] = mn.mc_ki[ptop]; - rc = mdb_cursor_sibling(mc, 0); - } - } - } else { - mn.mc_top--; - rc = mdb_node_add(&mn, mn.mc_ki[ptop], &sepkey, NULL, rp->mp_pgno, 0); - mn.mc_top++; - } - if (rc != MDB_SUCCESS) { - if (rc == MDB_NOTFOUND) /* improper mdb_cursor_sibling() result */ - rc = MDB_PROBLEM; - goto done; - } - if (nflags & MDB_APPEND) { - mc->mc_pg[mc->mc_top] = rp; - mc->mc_ki[mc->mc_top] = 0; - rc = mdb_node_add(mc, 0, newkey, newdata, newpgno, nflags); - if (rc) - goto done; - for (i=0; imc_top; i++) - mc->mc_ki[i] = mn.mc_ki[i]; - } else if (!IS_LEAF2(mp)) { - /* Move nodes */ - mc->mc_pg[mc->mc_top] = rp; - i = split_indx; - j = 0; - do { - if (i == newindx) { - rkey.mv_data = newkey->mv_data; - rkey.mv_size = newkey->mv_size; - if (IS_LEAF(mp)) { - rdata = newdata; - } else - pgno = newpgno; - flags = nflags; - /* Update index for the new key. */ - mc->mc_ki[mc->mc_top] = j; - } else { - node = (MDB_node *)((char *)mp + copy->mp_ptrs[i] + PAGEBASE); - rkey.mv_data = NODEKEY(node); - rkey.mv_size = node->mn_ksize; - if (IS_LEAF(mp)) { - xdata.mv_data = NODEDATA(node); - xdata.mv_size = NODEDSZ(node); - rdata = &xdata; - } else - pgno = NODEPGNO(node); - flags = node->mn_flags; - } - - if (!IS_LEAF(mp) && j == 0) { - /* First branch index doesn't need key data. */ - rkey.mv_size = 0; - } - - rc = mdb_node_add(mc, j, &rkey, rdata, pgno, flags); - if (rc) - goto done; - if (i == nkeys) { - i = 0; - j = 0; - mc->mc_pg[mc->mc_top] = copy; - } else { - i++; - j++; - } - } while (i != split_indx); - - nkeys = NUMKEYS(copy); - for (i=0; imp_ptrs[i] = copy->mp_ptrs[i]; - mp->mp_lower = copy->mp_lower; - mp->mp_upper = copy->mp_upper; - memcpy(NODEPTR(mp, nkeys-1), NODEPTR(copy, nkeys-1), - env->me_psize - copy->mp_upper - PAGEBASE); - - /* reset back to original page */ - if (newindx < split_indx) { - mc->mc_pg[mc->mc_top] = mp; - } else { - mc->mc_pg[mc->mc_top] = rp; - mc->mc_ki[ptop]++; - /* Make sure mc_ki is still valid. - */ - if (mn.mc_pg[ptop] != mc->mc_pg[ptop] && - mc->mc_ki[ptop] >= NUMKEYS(mc->mc_pg[ptop])) { - for (i=0; i<=ptop; i++) { - mc->mc_pg[i] = mn.mc_pg[i]; - mc->mc_ki[i] = mn.mc_ki[i]; - } - } - } - if (nflags & MDB_RESERVE) { - node = NODEPTR(mc->mc_pg[mc->mc_top], mc->mc_ki[mc->mc_top]); - if (!(node->mn_flags & F_BIGDATA)) - newdata->mv_data = NODEDATA(node); - } - } else { - if (newindx >= split_indx) { - mc->mc_pg[mc->mc_top] = rp; - mc->mc_ki[ptop]++; - /* Make sure mc_ki is still valid. - */ - if (mn.mc_pg[ptop] != mc->mc_pg[ptop] && - mc->mc_ki[ptop] >= NUMKEYS(mc->mc_pg[ptop])) { - for (i=0; i<=ptop; i++) { - mc->mc_pg[i] = mn.mc_pg[i]; - mc->mc_ki[i] = mn.mc_ki[i]; - } - } - } - } - - { - /* Adjust other cursors pointing to mp */ - MDB_cursor *m2, *m3; - MDB_dbi dbi = mc->mc_dbi; - nkeys = NUMKEYS(mp); - - for (m2 = mc->mc_txn->mt_cursors[dbi]; m2; m2=m2->mc_next) { - if (mc->mc_flags & C_SUB) - m3 = &m2->mc_xcursor->mx_cursor; - else - m3 = m2; - if (m3 == mc) - continue; - if (!(m2->mc_flags & m3->mc_flags & C_INITIALIZED)) - continue; - if (new_root) { - int k; - /* sub cursors may be on different DB */ - if (m3->mc_pg[0] != mp) - continue; - /* root split */ - for (k=new_root; k>=0; k--) { - m3->mc_ki[k+1] = m3->mc_ki[k]; - m3->mc_pg[k+1] = m3->mc_pg[k]; - } - if (m3->mc_ki[0] >= nkeys) { - m3->mc_ki[0] = 1; - } else { - m3->mc_ki[0] = 0; - } - m3->mc_pg[0] = mc->mc_pg[0]; - m3->mc_snum++; - m3->mc_top++; - } - if (m3->mc_top >= mc->mc_top && m3->mc_pg[mc->mc_top] == mp) { - if (m3->mc_ki[mc->mc_top] >= newindx && !(nflags & MDB_SPLIT_REPLACE)) - m3->mc_ki[mc->mc_top]++; - if (m3->mc_ki[mc->mc_top] >= nkeys) { - m3->mc_pg[mc->mc_top] = rp; - m3->mc_ki[mc->mc_top] -= nkeys; - for (i=0; imc_top; i++) { - m3->mc_ki[i] = mn.mc_ki[i]; - m3->mc_pg[i] = mn.mc_pg[i]; - } - } - } else if (!did_split && m3->mc_top >= ptop && m3->mc_pg[ptop] == mc->mc_pg[ptop] && - m3->mc_ki[ptop] >= mc->mc_ki[ptop]) { - m3->mc_ki[ptop]++; - } - if (IS_LEAF(mp)) - XCURSOR_REFRESH(m3, mc->mc_top, m3->mc_pg[mc->mc_top]); - } - } - DPRINTF(("mp left: %d, rp left: %d", SIZELEFT(mp), SIZELEFT(rp))); - -done: - if (copy) /* tmp page */ - mdb_page_free(env, copy); - if (rc) - mc->mc_txn->mt_flags |= MDB_TXN_ERROR; - return rc; -} - -int -mdb_put(MDB_txn *txn, MDB_dbi dbi, - MDB_val *key, MDB_val *data, unsigned int flags) -{ - MDB_cursor mc; - MDB_xcursor mx; - int rc; - - if (!key || !data || !TXN_DBI_EXIST(txn, dbi, DB_USRVALID)) - return EINVAL; - - if (flags & ~(MDB_NOOVERWRITE|MDB_NODUPDATA|MDB_RESERVE|MDB_APPEND|MDB_APPENDDUP)) - return EINVAL; - - if (txn->mt_flags & (MDB_TXN_RDONLY|MDB_TXN_BLOCKED)) - return (txn->mt_flags & MDB_TXN_RDONLY) ? EACCES : MDB_BAD_TXN; - - mdb_cursor_init(&mc, txn, dbi, &mx); - mc.mc_next = txn->mt_cursors[dbi]; - txn->mt_cursors[dbi] = &mc; - rc = mdb_cursor_put(&mc, key, data, flags); - txn->mt_cursors[dbi] = mc.mc_next; - return rc; -} - -#ifndef MDB_WBUF -#define MDB_WBUF (1024*1024) -#endif -#define MDB_EOF 0x10 /**< #mdb_env_copyfd1() is done reading */ - - /** State needed for a double-buffering compacting copy. */ -typedef struct mdb_copy { - MDB_env *mc_env; - MDB_txn *mc_txn; - pthread_mutex_t mc_mutex; - pthread_cond_t mc_cond; /**< Condition variable for #mc_new */ - char *mc_wbuf[2]; - char *mc_over[2]; - int mc_wlen[2]; - int mc_olen[2]; - pgno_t mc_next_pgno; - HANDLE mc_fd; - int mc_toggle; /**< Buffer number in provider */ - int mc_new; /**< (0-2 buffers to write) | (#MDB_EOF at end) */ - /** Error code. Never cleared if set. Both threads can set nonzero - * to fail the copy. Not mutex-protected, LMDB expects atomic int. - */ - volatile int mc_error; -} mdb_copy; - - /** Dedicated writer thread for compacting copy. */ -static THREAD_RET ESECT CALL_CONV -mdb_env_copythr(void *arg) -{ - mdb_copy *my = arg; - char *ptr; - int toggle = 0, wsize, rc; -#ifdef _WIN32 - DWORD len; -#define DO_WRITE(rc, fd, ptr, w2, len) rc = WriteFile(fd, ptr, w2, &len, NULL) -#else - int len; -#define DO_WRITE(rc, fd, ptr, w2, len) len = write(fd, ptr, w2); rc = (len >= 0) -#ifdef SIGPIPE - sigset_t set; - sigemptyset(&set); - sigaddset(&set, SIGPIPE); - if ((rc = pthread_sigmask(SIG_BLOCK, &set, NULL)) != 0) - my->mc_error = rc; -#endif -#endif - - pthread_mutex_lock(&my->mc_mutex); - for(;;) { - while (!my->mc_new) - pthread_cond_wait(&my->mc_cond, &my->mc_mutex); - if (my->mc_new == 0 + MDB_EOF) /* 0 buffers, just EOF */ - break; - wsize = my->mc_wlen[toggle]; - ptr = my->mc_wbuf[toggle]; -again: - rc = MDB_SUCCESS; - while (wsize > 0 && !my->mc_error) { - DO_WRITE(rc, my->mc_fd, ptr, wsize, len); - if (!rc) { - rc = ErrCode(); -#if defined(SIGPIPE) && !defined(_WIN32) - if (rc == EPIPE) { - /* Collect the pending SIGPIPE, otherwise at least OS X - * gives it to the process on thread-exit (ITS#8504). - */ - int tmp; - sigwait(&set, &tmp); - } -#endif - break; - } else if (len > 0) { - rc = MDB_SUCCESS; - ptr += len; - wsize -= len; - continue; - } else { - rc = EIO; - break; - } - } - if (rc) { - my->mc_error = rc; - } - /* If there's an overflow page tail, write it too */ - if (my->mc_olen[toggle]) { - wsize = my->mc_olen[toggle]; - ptr = my->mc_over[toggle]; - my->mc_olen[toggle] = 0; - goto again; - } - my->mc_wlen[toggle] = 0; - toggle ^= 1; - /* Return the empty buffer to provider */ - my->mc_new--; - pthread_cond_signal(&my->mc_cond); - } - pthread_mutex_unlock(&my->mc_mutex); - return (THREAD_RET)0; -#undef DO_WRITE -} - - /** Give buffer and/or #MDB_EOF to writer thread, await unused buffer. - * - * @param[in] my control structure. - * @param[in] adjust (1 to hand off 1 buffer) | (MDB_EOF when ending). - */ -static int ESECT -mdb_env_cthr_toggle(mdb_copy *my, int adjust) -{ - pthread_mutex_lock(&my->mc_mutex); - my->mc_new += adjust; - pthread_cond_signal(&my->mc_cond); - while (my->mc_new & 2) /* both buffers in use */ - pthread_cond_wait(&my->mc_cond, &my->mc_mutex); - pthread_mutex_unlock(&my->mc_mutex); - - my->mc_toggle ^= (adjust & 1); - /* Both threads reset mc_wlen, to be safe from threading errors */ - my->mc_wlen[my->mc_toggle] = 0; - return my->mc_error; -} - - /** Depth-first tree traversal for compacting copy. - * @param[in] my control structure. - * @param[in,out] pg database root. - * @param[in] flags includes #F_DUPDATA if it is a sorted-duplicate sub-DB. - */ -static int ESECT -mdb_env_cwalk(mdb_copy *my, pgno_t *pg, int flags) -{ - MDB_cursor mc = {0}; - MDB_node *ni; - MDB_page *mo, *mp, *leaf; - char *buf, *ptr; - int rc, toggle; - unsigned int i; - - /* Empty DB, nothing to do */ - if (*pg == P_INVALID) - return MDB_SUCCESS; - - mc.mc_snum = 1; - mc.mc_txn = my->mc_txn; - mc.mc_flags = my->mc_txn->mt_flags & (C_ORIG_RDONLY|C_WRITEMAP); - - rc = mdb_page_get(&mc, *pg, &mc.mc_pg[0], NULL); - if (rc) - return rc; - rc = mdb_page_search_root(&mc, NULL, MDB_PS_FIRST); - if (rc) - return rc; - - /* Make cursor pages writable */ - buf = ptr = malloc(my->mc_env->me_psize * mc.mc_snum); - if (buf == NULL) - return ENOMEM; - - for (i=0; imc_env->me_psize); - mc.mc_pg[i] = (MDB_page *)ptr; - ptr += my->mc_env->me_psize; - } - - /* This is writable space for a leaf page. Usually not needed. */ - leaf = (MDB_page *)ptr; - - toggle = my->mc_toggle; - while (mc.mc_snum > 0) { - unsigned n; - mp = mc.mc_pg[mc.mc_top]; - n = NUMKEYS(mp); - - if (IS_LEAF(mp)) { - if (!IS_LEAF2(mp) && !(flags & F_DUPDATA)) { - for (i=0; imn_flags & F_BIGDATA) { - MDB_page *omp; - pgno_t pg; - - /* Need writable leaf */ - if (mp != leaf) { - mc.mc_pg[mc.mc_top] = leaf; - mdb_page_copy(leaf, mp, my->mc_env->me_psize); - mp = leaf; - ni = NODEPTR(mp, i); - } - - memcpy(&pg, NODEDATA(ni), sizeof(pg)); - memcpy(NODEDATA(ni), &my->mc_next_pgno, sizeof(pgno_t)); - rc = mdb_page_get(&mc, pg, &omp, NULL); - if (rc) - goto done; - if (my->mc_wlen[toggle] >= MDB_WBUF) { - rc = mdb_env_cthr_toggle(my, 1); - if (rc) - goto done; - toggle = my->mc_toggle; - } - mo = (MDB_page *)(my->mc_wbuf[toggle] + my->mc_wlen[toggle]); - memcpy(mo, omp, my->mc_env->me_psize); - mo->mp_pgno = my->mc_next_pgno; - my->mc_next_pgno += omp->mp_pages; - my->mc_wlen[toggle] += my->mc_env->me_psize; - if (omp->mp_pages > 1) { - my->mc_olen[toggle] = my->mc_env->me_psize * (omp->mp_pages - 1); - my->mc_over[toggle] = (char *)omp + my->mc_env->me_psize; - rc = mdb_env_cthr_toggle(my, 1); - if (rc) - goto done; - toggle = my->mc_toggle; - } - } else if (ni->mn_flags & F_SUBDATA) { - MDB_db db; - - /* Need writable leaf */ - if (mp != leaf) { - mc.mc_pg[mc.mc_top] = leaf; - mdb_page_copy(leaf, mp, my->mc_env->me_psize); - mp = leaf; - ni = NODEPTR(mp, i); - } - - memcpy(&db, NODEDATA(ni), sizeof(db)); - my->mc_toggle = toggle; - rc = mdb_env_cwalk(my, &db.md_root, ni->mn_flags & F_DUPDATA); - if (rc) - goto done; - toggle = my->mc_toggle; - memcpy(NODEDATA(ni), &db, sizeof(db)); - } - } - } - } else { - mc.mc_ki[mc.mc_top]++; - if (mc.mc_ki[mc.mc_top] < n) { - pgno_t pg; -again: - ni = NODEPTR(mp, mc.mc_ki[mc.mc_top]); - pg = NODEPGNO(ni); - rc = mdb_page_get(&mc, pg, &mp, NULL); - if (rc) - goto done; - mc.mc_top++; - mc.mc_snum++; - mc.mc_ki[mc.mc_top] = 0; - if (IS_BRANCH(mp)) { - /* Whenever we advance to a sibling branch page, - * we must proceed all the way down to its first leaf. - */ - mdb_page_copy(mc.mc_pg[mc.mc_top], mp, my->mc_env->me_psize); - goto again; - } else - mc.mc_pg[mc.mc_top] = mp; - continue; - } - } - if (my->mc_wlen[toggle] >= MDB_WBUF) { - rc = mdb_env_cthr_toggle(my, 1); - if (rc) - goto done; - toggle = my->mc_toggle; - } - mo = (MDB_page *)(my->mc_wbuf[toggle] + my->mc_wlen[toggle]); - mdb_page_copy(mo, mp, my->mc_env->me_psize); - mo->mp_pgno = my->mc_next_pgno++; - my->mc_wlen[toggle] += my->mc_env->me_psize; - if (mc.mc_top) { - /* Update parent if there is one */ - ni = NODEPTR(mc.mc_pg[mc.mc_top-1], mc.mc_ki[mc.mc_top-1]); - SETPGNO(ni, mo->mp_pgno); - mdb_cursor_pop(&mc); - } else { - /* Otherwise we're done */ - *pg = mo->mp_pgno; - break; - } - } -done: - free(buf); - return rc; -} - - /** Copy environment with compaction. */ -static int ESECT -mdb_env_copyfd1(MDB_env *env, HANDLE fd) -{ - MDB_meta *mm; - MDB_page *mp; - mdb_copy my = {0}; - MDB_txn *txn = NULL; - pthread_t thr; - pgno_t root, new_root; - int rc = MDB_SUCCESS; - -#ifdef _WIN32 - if (!(my.mc_mutex = CreateMutex(NULL, FALSE, NULL)) || - !(my.mc_cond = CreateEvent(NULL, FALSE, FALSE, NULL))) { - rc = ErrCode(); - goto done; - } - my.mc_wbuf[0] = _aligned_malloc(MDB_WBUF*2, env->me_os_psize); - if (my.mc_wbuf[0] == NULL) { - /* _aligned_malloc() sets errno, but we use Windows error codes */ - rc = ERROR_NOT_ENOUGH_MEMORY; - goto done; - } -#else - if ((rc = pthread_mutex_init(&my.mc_mutex, NULL)) != 0) - return rc; - if ((rc = pthread_cond_init(&my.mc_cond, NULL)) != 0) - goto done2; -#ifdef HAVE_MEMALIGN - my.mc_wbuf[0] = memalign(env->me_os_psize, MDB_WBUF*2); - if (my.mc_wbuf[0] == NULL) { - rc = errno; - goto done; - } -#else - { - void *p; - if ((rc = posix_memalign(&p, env->me_os_psize, MDB_WBUF*2)) != 0) - goto done; - my.mc_wbuf[0] = p; - } -#endif -#endif - memset(my.mc_wbuf[0], 0, MDB_WBUF*2); - my.mc_wbuf[1] = my.mc_wbuf[0] + MDB_WBUF; - my.mc_next_pgno = NUM_METAS; - my.mc_env = env; - my.mc_fd = fd; - rc = THREAD_CREATE(thr, mdb_env_copythr, &my); - if (rc) - goto done; - - rc = mdb_txn_begin(env, NULL, MDB_RDONLY, &txn); - if (rc) - goto finish; - - mp = (MDB_page *)my.mc_wbuf[0]; - memset(mp, 0, NUM_METAS * env->me_psize); - mp->mp_pgno = 0; - mp->mp_flags = P_META; - mm = (MDB_meta *)METADATA(mp); - mdb_env_init_meta0(env, mm); - mm->mm_address = env->me_metas[0]->mm_address; - - mp = (MDB_page *)(my.mc_wbuf[0] + env->me_psize); - mp->mp_pgno = 1; - mp->mp_flags = P_META; - *(MDB_meta *)METADATA(mp) = *mm; - mm = (MDB_meta *)METADATA(mp); - - /* Set metapage 1 with current main DB */ - root = new_root = txn->mt_dbs[MAIN_DBI].md_root; - if (root != P_INVALID) { - /* Count free pages + freeDB pages. Subtract from last_pg - * to find the new last_pg, which also becomes the new root. - */ - MDB_ID freecount = 0; - MDB_cursor mc; - MDB_val key, data; - mdb_cursor_init(&mc, txn, FREE_DBI, NULL); - while ((rc = mdb_cursor_get(&mc, &key, &data, MDB_NEXT)) == 0) - freecount += *(MDB_ID *)data.mv_data; - if (rc != MDB_NOTFOUND) - goto finish; - freecount += txn->mt_dbs[FREE_DBI].md_branch_pages + - txn->mt_dbs[FREE_DBI].md_leaf_pages + - txn->mt_dbs[FREE_DBI].md_overflow_pages; - - new_root = txn->mt_next_pgno - 1 - freecount; - mm->mm_last_pg = new_root; - mm->mm_dbs[MAIN_DBI] = txn->mt_dbs[MAIN_DBI]; - mm->mm_dbs[MAIN_DBI].md_root = new_root; - } else { - /* When the DB is empty, handle it specially to - * fix any breakage like page leaks from ITS#8174. - */ - mm->mm_dbs[MAIN_DBI].md_flags = txn->mt_dbs[MAIN_DBI].md_flags; - } - if (root != P_INVALID || mm->mm_dbs[MAIN_DBI].md_flags) { - mm->mm_txnid = 1; /* use metapage 1 */ - } - - my.mc_wlen[0] = env->me_psize * NUM_METAS; - my.mc_txn = txn; - rc = mdb_env_cwalk(&my, &root, 0); - if (rc == MDB_SUCCESS && root != new_root) { - rc = MDB_INCOMPATIBLE; /* page leak or corrupt DB */ - } - -finish: - if (rc) - my.mc_error = rc; - mdb_env_cthr_toggle(&my, 1 | MDB_EOF); - rc = THREAD_FINISH(thr); - mdb_txn_abort(txn); - -done: -#ifdef _WIN32 - if (my.mc_wbuf[0]) _aligned_free(my.mc_wbuf[0]); - if (my.mc_cond) CloseHandle(my.mc_cond); - if (my.mc_mutex) CloseHandle(my.mc_mutex); -#else - free(my.mc_wbuf[0]); - pthread_cond_destroy(&my.mc_cond); -done2: - pthread_mutex_destroy(&my.mc_mutex); -#endif - return rc ? rc : my.mc_error; -} - - /** Copy environment as-is. */ -static int ESECT -mdb_env_copyfd0(MDB_env *env, HANDLE fd) -{ - MDB_txn *txn = NULL; - mdb_mutexref_t wmutex = NULL; - int rc; - mdb_size_t wsize, w3; - char *ptr; -#ifdef _WIN32 - DWORD len, w2; -#define DO_WRITE(rc, fd, ptr, w2, len) rc = WriteFile(fd, ptr, w2, &len, NULL) -#else - ssize_t len; - size_t w2; -#define DO_WRITE(rc, fd, ptr, w2, len) len = write(fd, ptr, w2); rc = (len >= 0) -#endif - - /* Do the lock/unlock of the reader mutex before starting the - * write txn. Otherwise other read txns could block writers. - */ - rc = mdb_txn_begin(env, NULL, MDB_RDONLY, &txn); - if (rc) - return rc; - - if (env->me_txns) { - /* We must start the actual read txn after blocking writers */ - mdb_txn_end(txn, MDB_END_RESET_TMP); - - /* Temporarily block writers until we snapshot the meta pages */ - wmutex = env->me_wmutex; - if (LOCK_MUTEX(rc, env, wmutex)) - goto leave; - - rc = mdb_txn_renew0(txn); - if (rc) { - UNLOCK_MUTEX(wmutex); - goto leave; - } - } - - wsize = env->me_psize * NUM_METAS; - ptr = env->me_map; - w2 = wsize; - while (w2 > 0) { - DO_WRITE(rc, fd, ptr, w2, len); - if (!rc) { - rc = ErrCode(); - break; - } else if (len > 0) { - rc = MDB_SUCCESS; - ptr += len; - w2 -= len; - continue; - } else { - /* Non-blocking or async handles are not supported */ - rc = EIO; - break; - } - } - if (wmutex) - UNLOCK_MUTEX(wmutex); - - if (rc) - goto leave; - - w3 = txn->mt_next_pgno * env->me_psize; - { - mdb_size_t fsize = 0; - if ((rc = mdb_fsize(env->me_fd, &fsize))) - goto leave; - if (w3 > fsize) - w3 = fsize; - } - wsize = w3 - wsize; - while (wsize > 0) { - if (wsize > MAX_WRITE) - w2 = MAX_WRITE; - else - w2 = wsize; - DO_WRITE(rc, fd, ptr, w2, len); - if (!rc) { - rc = ErrCode(); - break; - } else if (len > 0) { - rc = MDB_SUCCESS; - ptr += len; - wsize -= len; - continue; - } else { - rc = EIO; - break; - } - } - -leave: - mdb_txn_abort(txn); - return rc; -} - -int ESECT -mdb_env_copyfd2(MDB_env *env, HANDLE fd, unsigned int flags) -{ - if (flags & MDB_CP_COMPACT) - return mdb_env_copyfd1(env, fd); - else - return mdb_env_copyfd0(env, fd); -} - -int ESECT -mdb_env_copyfd(MDB_env *env, HANDLE fd) -{ - return mdb_env_copyfd2(env, fd, 0); -} - -int ESECT -mdb_env_copy2(MDB_env *env, const char *path, unsigned int flags) -{ - int rc; - MDB_name fname; - HANDLE newfd = INVALID_HANDLE_VALUE; - - rc = mdb_fname_init(path, env->me_flags | MDB_NOLOCK, &fname); - if (rc == MDB_SUCCESS) { - rc = mdb_fopen(env, &fname, MDB_O_COPY, 0666, &newfd); - mdb_fname_destroy(fname); - } - if (rc == MDB_SUCCESS) { - rc = mdb_env_copyfd2(env, newfd, flags); - if (close(newfd) < 0 && rc == MDB_SUCCESS) - rc = ErrCode(); - } - return rc; -} - -int ESECT -mdb_env_copy(MDB_env *env, const char *path) -{ - return mdb_env_copy2(env, path, 0); -} - -int ESECT -mdb_env_set_flags(MDB_env *env, unsigned int flag, int onoff) -{ - if (flag & ~CHANGEABLE) - return EINVAL; - if (onoff) - env->me_flags |= flag; - else - env->me_flags &= ~flag; - return MDB_SUCCESS; -} - -int ESECT -mdb_env_get_flags(MDB_env *env, unsigned int *arg) -{ - if (!env || !arg) - return EINVAL; - - *arg = env->me_flags & (CHANGEABLE|CHANGELESS); - return MDB_SUCCESS; -} - -int ESECT -mdb_env_set_userctx(MDB_env *env, void *ctx) -{ - if (!env) - return EINVAL; - env->me_userctx = ctx; - return MDB_SUCCESS; -} - -void * ESECT -mdb_env_get_userctx(MDB_env *env) -{ - return env ? env->me_userctx : NULL; -} - -int ESECT -mdb_env_set_assert(MDB_env *env, MDB_assert_func *func) -{ - if (!env) - return EINVAL; -#ifndef NDEBUG - env->me_assert_func = func; -#endif - return MDB_SUCCESS; -} - -int ESECT -mdb_env_get_path(MDB_env *env, const char **arg) -{ - if (!env || !arg) - return EINVAL; - - *arg = env->me_path; - return MDB_SUCCESS; -} - -int ESECT -mdb_env_get_fd(MDB_env *env, mdb_filehandle_t *arg) -{ - if (!env || !arg) - return EINVAL; - - *arg = env->me_fd; - return MDB_SUCCESS; -} - -/** Common code for #mdb_stat() and #mdb_env_stat(). - * @param[in] env the environment to operate in. - * @param[in] db the #MDB_db record containing the stats to return. - * @param[out] arg the address of an #MDB_stat structure to receive the stats. - * @return 0, this function always succeeds. - */ -static int ESECT -mdb_stat0(MDB_env *env, MDB_db *db, MDB_stat *arg) -{ - arg->ms_psize = env->me_psize; - arg->ms_depth = db->md_depth; - arg->ms_branch_pages = db->md_branch_pages; - arg->ms_leaf_pages = db->md_leaf_pages; - arg->ms_overflow_pages = db->md_overflow_pages; - arg->ms_entries = db->md_entries; - - return MDB_SUCCESS; -} - -int ESECT -mdb_env_stat(MDB_env *env, MDB_stat *arg) -{ - MDB_meta *meta; - - if (env == NULL || arg == NULL) - return EINVAL; - - meta = mdb_env_pick_meta(env); - - return mdb_stat0(env, &meta->mm_dbs[MAIN_DBI], arg); -} - -int ESECT -mdb_env_info(MDB_env *env, MDB_envinfo *arg) -{ - MDB_meta *meta; - - if (env == NULL || arg == NULL) - return EINVAL; - - meta = mdb_env_pick_meta(env); - arg->me_mapaddr = meta->mm_address; - arg->me_last_pgno = meta->mm_last_pg; - arg->me_last_txnid = meta->mm_txnid; - - arg->me_mapsize = env->me_mapsize; - arg->me_maxreaders = env->me_maxreaders; - arg->me_numreaders = env->me_txns ? env->me_txns->mti_numreaders : 0; - return MDB_SUCCESS; -} - -/** Set the default comparison functions for a database. - * Called immediately after a database is opened to set the defaults. - * The user can then override them with #mdb_set_compare() or - * #mdb_set_dupsort(). - * @param[in] txn A transaction handle returned by #mdb_txn_begin() - * @param[in] dbi A database handle returned by #mdb_dbi_open() - */ -static void -mdb_default_cmp(MDB_txn *txn, MDB_dbi dbi) -{ - uint16_t f = txn->mt_dbs[dbi].md_flags; - - txn->mt_dbxs[dbi].md_cmp = - (f & MDB_REVERSEKEY) ? mdb_cmp_memnr : - (f & MDB_INTEGERKEY) ? mdb_cmp_cint : mdb_cmp_memn; - - txn->mt_dbxs[dbi].md_dcmp = - !(f & MDB_DUPSORT) ? 0 : - ((f & MDB_INTEGERDUP) - ? ((f & MDB_DUPFIXED) ? mdb_cmp_int : mdb_cmp_cint) - : ((f & MDB_REVERSEDUP) ? mdb_cmp_memnr : mdb_cmp_memn)); -} - -int mdb_dbi_open(MDB_txn *txn, const char *name, unsigned int flags, MDB_dbi *dbi) -{ - MDB_val key, data; - MDB_dbi i; - MDB_cursor mc; - MDB_db dummy; - int rc, dbflag, exact; - unsigned int unused = 0, seq; - char *namedup; - size_t len; - - if (flags & ~VALID_FLAGS) - return EINVAL; - if (txn->mt_flags & MDB_TXN_BLOCKED) - return MDB_BAD_TXN; - - /* main DB? */ - if (!name) { - *dbi = MAIN_DBI; - if (flags & PERSISTENT_FLAGS) { - uint16_t f2 = flags & PERSISTENT_FLAGS; - /* make sure flag changes get committed */ - if ((txn->mt_dbs[MAIN_DBI].md_flags | f2) != txn->mt_dbs[MAIN_DBI].md_flags) { - txn->mt_dbs[MAIN_DBI].md_flags |= f2; - txn->mt_flags |= MDB_TXN_DIRTY; - } - } - mdb_default_cmp(txn, MAIN_DBI); - return MDB_SUCCESS; - } - - if (txn->mt_dbxs[MAIN_DBI].md_cmp == NULL) { - mdb_default_cmp(txn, MAIN_DBI); - } - - /* Is the DB already open? */ - len = strlen(name); - for (i=CORE_DBS; imt_numdbs; i++) { - if (!txn->mt_dbxs[i].md_name.mv_size) { - /* Remember this free slot */ - if (!unused) unused = i; - continue; - } - if (len == txn->mt_dbxs[i].md_name.mv_size && - !strncmp(name, txn->mt_dbxs[i].md_name.mv_data, len)) { - *dbi = i; - return MDB_SUCCESS; - } - } - - /* If no free slot and max hit, fail */ - if (!unused && txn->mt_numdbs >= txn->mt_env->me_maxdbs) - return MDB_DBS_FULL; - - /* Cannot mix named databases with some mainDB flags */ - if (txn->mt_dbs[MAIN_DBI].md_flags & (MDB_DUPSORT|MDB_INTEGERKEY)) - return (flags & MDB_CREATE) ? MDB_INCOMPATIBLE : MDB_NOTFOUND; - - /* Find the DB info */ - dbflag = DB_NEW|DB_VALID|DB_USRVALID; - exact = 0; - key.mv_size = len; - key.mv_data = (void *)name; - mdb_cursor_init(&mc, txn, MAIN_DBI, NULL); - rc = mdb_cursor_set(&mc, &key, &data, MDB_SET, &exact); - if (rc == MDB_SUCCESS) { - /* make sure this is actually a DB */ - MDB_node *node = NODEPTR(mc.mc_pg[mc.mc_top], mc.mc_ki[mc.mc_top]); - if ((node->mn_flags & (F_DUPDATA|F_SUBDATA)) != F_SUBDATA) - return MDB_INCOMPATIBLE; - } else { - if (rc != MDB_NOTFOUND || !(flags & MDB_CREATE)) - return rc; - if (F_ISSET(txn->mt_flags, MDB_TXN_RDONLY)) - return EACCES; - } - - /* Done here so we cannot fail after creating a new DB */ - if ((namedup = strdup(name)) == NULL) - return ENOMEM; - - if (rc) { - /* MDB_NOTFOUND and MDB_CREATE: Create new DB */ - data.mv_size = sizeof(MDB_db); - data.mv_data = &dummy; - memset(&dummy, 0, sizeof(dummy)); - dummy.md_root = P_INVALID; - dummy.md_flags = flags & PERSISTENT_FLAGS; - WITH_CURSOR_TRACKING(mc, - rc = mdb_cursor_put(&mc, &key, &data, F_SUBDATA)); - dbflag |= DB_DIRTY; - } - - if (rc) { - free(namedup); - } else { - /* Got info, register DBI in this txn */ - unsigned int slot = unused ? unused : txn->mt_numdbs; - txn->mt_dbxs[slot].md_name.mv_data = namedup; - txn->mt_dbxs[slot].md_name.mv_size = len; - txn->mt_dbxs[slot].md_rel = NULL; - txn->mt_dbflags[slot] = dbflag; - /* txn-> and env-> are the same in read txns, use - * tmp variable to avoid undefined assignment - */ - seq = ++txn->mt_env->me_dbiseqs[slot]; - txn->mt_dbiseqs[slot] = seq; - - memcpy(&txn->mt_dbs[slot], data.mv_data, sizeof(MDB_db)); - *dbi = slot; - mdb_default_cmp(txn, slot); - if (!unused) { - txn->mt_numdbs++; - } - } - - return rc; -} - -int ESECT -mdb_stat(MDB_txn *txn, MDB_dbi dbi, MDB_stat *arg) -{ - if (!arg || !TXN_DBI_EXIST(txn, dbi, DB_VALID)) - return EINVAL; - - if (txn->mt_flags & MDB_TXN_BLOCKED) - return MDB_BAD_TXN; - - if (txn->mt_dbflags[dbi] & DB_STALE) { - MDB_cursor mc; - MDB_xcursor mx; - /* Stale, must read the DB's root. cursor_init does it for us. */ - mdb_cursor_init(&mc, txn, dbi, &mx); - } - return mdb_stat0(txn->mt_env, &txn->mt_dbs[dbi], arg); -} - -void mdb_dbi_close(MDB_env *env, MDB_dbi dbi) -{ - char *ptr; - if (dbi < CORE_DBS || dbi >= env->me_maxdbs) - return; - ptr = env->me_dbxs[dbi].md_name.mv_data; - /* If there was no name, this was already closed */ - if (ptr) { - env->me_dbxs[dbi].md_name.mv_data = NULL; - env->me_dbxs[dbi].md_name.mv_size = 0; - env->me_dbflags[dbi] = 0; - env->me_dbiseqs[dbi]++; - free(ptr); - } -} - -int mdb_dbi_flags(MDB_txn *txn, MDB_dbi dbi, unsigned int *flags) -{ - /* We could return the flags for the FREE_DBI too but what's the point? */ - if (!TXN_DBI_EXIST(txn, dbi, DB_USRVALID)) - return EINVAL; - *flags = txn->mt_dbs[dbi].md_flags & PERSISTENT_FLAGS; - return MDB_SUCCESS; -} - -/** Add all the DB's pages to the free list. - * @param[in] mc Cursor on the DB to free. - * @param[in] subs non-Zero to check for sub-DBs in this DB. - * @return 0 on success, non-zero on failure. - */ -static int -mdb_drop0(MDB_cursor *mc, int subs) -{ - int rc; - - rc = mdb_page_search(mc, NULL, MDB_PS_FIRST); - if (rc == MDB_SUCCESS) { - MDB_txn *txn = mc->mc_txn; - MDB_node *ni; - MDB_cursor mx; - unsigned int i; - - /* DUPSORT sub-DBs have no ovpages/DBs. Omit scanning leaves. - * This also avoids any P_LEAF2 pages, which have no nodes. - * Also if the DB doesn't have sub-DBs and has no overflow - * pages, omit scanning leaves. - */ - if ((mc->mc_flags & C_SUB) || - (!subs && !mc->mc_db->md_overflow_pages)) - mdb_cursor_pop(mc); - - mdb_cursor_copy(mc, &mx); -#ifdef MDB_VL32 - /* bump refcount for mx's pages */ - for (i=0; imc_snum; i++) - mdb_page_get(&mx, mc->mc_pg[i]->mp_pgno, &mx.mc_pg[i], NULL); -#endif - while (mc->mc_snum > 0) { - MDB_page *mp = mc->mc_pg[mc->mc_top]; - unsigned n = NUMKEYS(mp); - if (IS_LEAF(mp)) { - for (i=0; imn_flags & F_BIGDATA) { - MDB_page *omp; - pgno_t pg; - memcpy(&pg, NODEDATA(ni), sizeof(pg)); - rc = mdb_page_get(mc, pg, &omp, NULL); - if (rc != 0) - goto done; - mdb_cassert(mc, IS_OVERFLOW(omp)); - rc = mdb_midl_append_range(&txn->mt_free_pgs, - pg, omp->mp_pages); - if (rc) - goto done; - mc->mc_db->md_overflow_pages -= omp->mp_pages; - if (!mc->mc_db->md_overflow_pages && !subs) - break; - } else if (subs && (ni->mn_flags & F_SUBDATA)) { - mdb_xcursor_init1(mc, ni); - rc = mdb_drop0(&mc->mc_xcursor->mx_cursor, 0); - if (rc) - goto done; - } - } - if (!subs && !mc->mc_db->md_overflow_pages) - goto pop; - } else { - if ((rc = mdb_midl_need(&txn->mt_free_pgs, n)) != 0) - goto done; - for (i=0; imt_free_pgs, pg); - } - } - if (!mc->mc_top) - break; - mc->mc_ki[mc->mc_top] = i; - rc = mdb_cursor_sibling(mc, 1); - if (rc) { - if (rc != MDB_NOTFOUND) - goto done; - /* no more siblings, go back to beginning - * of previous level. - */ -pop: - mdb_cursor_pop(mc); - mc->mc_ki[0] = 0; - for (i=1; imc_snum; i++) { - mc->mc_ki[i] = 0; - mc->mc_pg[i] = mx.mc_pg[i]; - } - } - } - /* free it */ - rc = mdb_midl_append(&txn->mt_free_pgs, mc->mc_db->md_root); -done: - if (rc) - txn->mt_flags |= MDB_TXN_ERROR; - /* drop refcount for mx's pages */ - MDB_CURSOR_UNREF(&mx, 0); - } else if (rc == MDB_NOTFOUND) { - rc = MDB_SUCCESS; - } - mc->mc_flags &= ~C_INITIALIZED; - return rc; -} - -int mdb_drop(MDB_txn *txn, MDB_dbi dbi, int del) -{ - MDB_cursor *mc, *m2; - int rc; - - if ((unsigned)del > 1 || !TXN_DBI_EXIST(txn, dbi, DB_USRVALID)) - return EINVAL; - - if (F_ISSET(txn->mt_flags, MDB_TXN_RDONLY)) - return EACCES; - - if (TXN_DBI_CHANGED(txn, dbi)) - return MDB_BAD_DBI; - - rc = mdb_cursor_open(txn, dbi, &mc); - if (rc) - return rc; - - rc = mdb_drop0(mc, mc->mc_db->md_flags & MDB_DUPSORT); - /* Invalidate the dropped DB's cursors */ - for (m2 = txn->mt_cursors[dbi]; m2; m2 = m2->mc_next) - m2->mc_flags &= ~(C_INITIALIZED|C_EOF); - if (rc) - goto leave; - - /* Can't delete the main DB */ - if (del && dbi >= CORE_DBS) { - rc = mdb_del0(txn, MAIN_DBI, &mc->mc_dbx->md_name, NULL, F_SUBDATA); - if (!rc) { - txn->mt_dbflags[dbi] = DB_STALE; - mdb_dbi_close(txn->mt_env, dbi); - } else { - txn->mt_flags |= MDB_TXN_ERROR; - } - } else { - /* reset the DB record, mark it dirty */ - txn->mt_dbflags[dbi] |= DB_DIRTY; - txn->mt_dbs[dbi].md_depth = 0; - txn->mt_dbs[dbi].md_branch_pages = 0; - txn->mt_dbs[dbi].md_leaf_pages = 0; - txn->mt_dbs[dbi].md_overflow_pages = 0; - txn->mt_dbs[dbi].md_entries = 0; - txn->mt_dbs[dbi].md_root = P_INVALID; - - txn->mt_flags |= MDB_TXN_DIRTY; - } -leave: - mdb_cursor_close(mc); - return rc; -} - -int mdb_set_compare(MDB_txn *txn, MDB_dbi dbi, MDB_cmp_func *cmp) -{ - if (!TXN_DBI_EXIST(txn, dbi, DB_USRVALID)) - return EINVAL; - - txn->mt_dbxs[dbi].md_cmp = cmp; - return MDB_SUCCESS; -} - -int mdb_set_dupsort(MDB_txn *txn, MDB_dbi dbi, MDB_cmp_func *cmp) -{ - if (!TXN_DBI_EXIST(txn, dbi, DB_USRVALID)) - return EINVAL; - - txn->mt_dbxs[dbi].md_dcmp = cmp; - return MDB_SUCCESS; -} - -int mdb_set_relfunc(MDB_txn *txn, MDB_dbi dbi, MDB_rel_func *rel) -{ - if (!TXN_DBI_EXIST(txn, dbi, DB_USRVALID)) - return EINVAL; - - txn->mt_dbxs[dbi].md_rel = rel; - return MDB_SUCCESS; -} - -int mdb_set_relctx(MDB_txn *txn, MDB_dbi dbi, void *ctx) -{ - if (!TXN_DBI_EXIST(txn, dbi, DB_USRVALID)) - return EINVAL; - - txn->mt_dbxs[dbi].md_relctx = ctx; - return MDB_SUCCESS; -} - -int ESECT -mdb_env_get_maxkeysize(MDB_env *env) -{ - return ENV_MAXKEY(env); -} - -int ESECT -mdb_reader_list(MDB_env *env, MDB_msg_func *func, void *ctx) -{ - unsigned int i, rdrs; - MDB_reader *mr; - char buf[64]; - int rc = 0, first = 1; - - if (!env || !func) - return -1; - if (!env->me_txns) { - return func("(no reader locks)\n", ctx); - } - rdrs = env->me_txns->mti_numreaders; - mr = env->me_txns->mti_readers; - for (i=0; i> 1; - cursor = base + pivot + 1; - val = pid - ids[cursor]; - - if( val < 0 ) { - n = pivot; - - } else if ( val > 0 ) { - base = cursor; - n -= pivot + 1; - - } else { - /* found, so it's a duplicate */ - return -1; - } - } - - if( val > 0 ) { - ++cursor; - } - ids[0]++; - for (n = ids[0]; n > cursor; n--) - ids[n] = ids[n-1]; - ids[n] = pid; - return 0; -} - -int ESECT -mdb_reader_check(MDB_env *env, int *dead) -{ - if (!env) - return EINVAL; - if (dead) - *dead = 0; - return env->me_txns ? mdb_reader_check0(env, 0, dead) : MDB_SUCCESS; -} - -/** As #mdb_reader_check(). \b rlocked is set if caller locked #me_rmutex. */ -static int ESECT -mdb_reader_check0(MDB_env *env, int rlocked, int *dead) -{ - mdb_mutexref_t rmutex = rlocked ? NULL : env->me_rmutex; - unsigned int i, j, rdrs; - MDB_reader *mr; - MDB_PID_T *pids, pid; - int rc = MDB_SUCCESS, count = 0; - - rdrs = env->me_txns->mti_numreaders; - pids = malloc((rdrs+1) * sizeof(MDB_PID_T)); - if (!pids) - return ENOMEM; - pids[0] = 0; - mr = env->me_txns->mti_readers; - for (i=0; ime_pid) { - if (mdb_pid_insert(pids, pid) == 0) { - if (!mdb_reader_pid(env, Pidcheck, pid)) { - /* Stale reader found */ - j = i; - if (rmutex) { - if ((rc = LOCK_MUTEX0(rmutex)) != 0) { - if ((rc = mdb_mutex_failed(env, rmutex, rc))) - break; - rdrs = 0; /* the above checked all readers */ - } else { - /* Recheck, a new process may have reused pid */ - if (mdb_reader_pid(env, Pidcheck, pid)) - j = rdrs; - } - } - for (; jme_rmutex); - if (!rlocked) { - /* Keep mti_txnid updated, otherwise next writer can - * overwrite data which latest meta page refers to. - */ - meta = mdb_env_pick_meta(env); - env->me_txns->mti_txnid = meta->mm_txnid; - /* env is hosed if the dead thread was ours */ - if (env->me_txn) { - env->me_flags |= MDB_FATAL_ERROR; - env->me_txn = NULL; - rc = MDB_PANIC; - } - } - DPRINTF(("%cmutex owner died, %s", (rlocked ? 'r' : 'w'), - (rc ? "this process' env is hosed" : "recovering"))); - rc2 = mdb_reader_check0(env, rlocked, NULL); - if (rc2 == 0) - rc2 = mdb_mutex_consistent(mutex); - if (rc || (rc = rc2)) { - DPRINTF(("LOCK_MUTEX recovery failed, %s", mdb_strerror(rc))); - UNLOCK_MUTEX(mutex); - } - } else { -#ifdef _WIN32 - rc = ErrCode(); -#endif - DPRINTF(("LOCK_MUTEX failed, %s", mdb_strerror(rc))); - } - - return rc; -} -#endif /* MDB_ROBUST_SUPPORTED */ - -#if defined(_WIN32) -/** Convert \b src to new wchar_t[] string with room for \b xtra extra chars */ -static int ESECT -utf8_to_utf16(const char *src, MDB_name *dst, int xtra) -{ - int rc, need = 0; - wchar_t *result = NULL; - for (;;) { /* malloc result, then fill it in */ - need = MultiByteToWideChar(CP_UTF8, 0, src, -1, result, need); - if (!need) { - rc = ErrCode(); - free(result); - return rc; - } - if (!result) { - result = malloc(sizeof(wchar_t) * (need + xtra)); - if (!result) - return ENOMEM; - continue; - } - dst->mn_alloced = 1; - dst->mn_len = need - 1; - dst->mn_val = result; - return MDB_SUCCESS; - } -} -#endif /* defined(_WIN32) */ -/** @} */ diff --git a/vendors/ocaml-lmdb/src/midl.c b/vendors/ocaml-lmdb/src/midl.c deleted file mode 100644 index 341021cc5fb8..000000000000 --- a/vendors/ocaml-lmdb/src/midl.c +++ /dev/null @@ -1,421 +0,0 @@ -/** @file midl.c - * @brief ldap bdb back-end ID List functions */ -/* $OpenLDAP$ */ -/* This work is part of OpenLDAP Software . - * - * Copyright 2000-2018 The OpenLDAP Foundation. - * Portions Copyright 2001-2018 Howard Chu, Symas Corp. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted only as authorized by the OpenLDAP - * Public License. - * - * A copy of this license is available in the file LICENSE in the - * top-level directory of the distribution or, alternatively, at - * . - */ - -#include -#include -#include -#include -#include -#include "midl.h" - -/** @defgroup internal LMDB Internals - * @{ - */ -/** @defgroup idls ID List Management - * @{ - */ -#define CMP(x,y) ( (x) < (y) ? -1 : (x) > (y) ) - -unsigned mdb_midl_search( MDB_IDL ids, MDB_ID id ) -{ - /* - * binary search of id in ids - * if found, returns position of id - * if not found, returns first position greater than id - */ - unsigned base = 0; - unsigned cursor = 1; - int val = 0; - unsigned n = ids[0]; - - while( 0 < n ) { - unsigned pivot = n >> 1; - cursor = base + pivot + 1; - val = CMP( ids[cursor], id ); - - if( val < 0 ) { - n = pivot; - - } else if ( val > 0 ) { - base = cursor; - n -= pivot + 1; - - } else { - return cursor; - } - } - - if( val > 0 ) { - ++cursor; - } - return cursor; -} - -#if 0 /* superseded by append/sort */ -int mdb_midl_insert( MDB_IDL ids, MDB_ID id ) -{ - unsigned x, i; - - x = mdb_midl_search( ids, id ); - assert( x > 0 ); - - if( x < 1 ) { - /* internal error */ - return -2; - } - - if ( x <= ids[0] && ids[x] == id ) { - /* duplicate */ - assert(0); - return -1; - } - - if ( ++ids[0] >= MDB_IDL_DB_MAX ) { - /* no room */ - --ids[0]; - return -2; - - } else { - /* insert id */ - for (i=ids[0]; i>x; i--) - ids[i] = ids[i-1]; - ids[x] = id; - } - - return 0; -} -#endif - -MDB_IDL mdb_midl_alloc(int num) -{ - MDB_IDL ids = malloc((num+2) * sizeof(MDB_ID)); - if (ids) { - *ids++ = num; - *ids = 0; - } - return ids; -} - -void mdb_midl_free(MDB_IDL ids) -{ - if (ids) - free(ids-1); -} - -void mdb_midl_shrink( MDB_IDL *idp ) -{ - MDB_IDL ids = *idp; - if (*(--ids) > MDB_IDL_UM_MAX && - (ids = realloc(ids, (MDB_IDL_UM_MAX+2) * sizeof(MDB_ID)))) - { - *ids++ = MDB_IDL_UM_MAX; - *idp = ids; - } -} - -static int mdb_midl_grow( MDB_IDL *idp, int num ) -{ - MDB_IDL idn = *idp-1; - /* grow it */ - idn = realloc(idn, (*idn + num + 2) * sizeof(MDB_ID)); - if (!idn) - return ENOMEM; - *idn++ += num; - *idp = idn; - return 0; -} - -int mdb_midl_need( MDB_IDL *idp, unsigned num ) -{ - MDB_IDL ids = *idp; - num += ids[0]; - if (num > ids[-1]) { - num = (num + num/4 + (256 + 2)) & -256; - if (!(ids = realloc(ids-1, num * sizeof(MDB_ID)))) - return ENOMEM; - *ids++ = num - 2; - *idp = ids; - } - return 0; -} - -int mdb_midl_append( MDB_IDL *idp, MDB_ID id ) -{ - MDB_IDL ids = *idp; - /* Too big? */ - if (ids[0] >= ids[-1]) { - if (mdb_midl_grow(idp, MDB_IDL_UM_MAX)) - return ENOMEM; - ids = *idp; - } - ids[0]++; - ids[ids[0]] = id; - return 0; -} - -int mdb_midl_append_list( MDB_IDL *idp, MDB_IDL app ) -{ - MDB_IDL ids = *idp; - /* Too big? */ - if (ids[0] + app[0] >= ids[-1]) { - if (mdb_midl_grow(idp, app[0])) - return ENOMEM; - ids = *idp; - } - memcpy(&ids[ids[0]+1], &app[1], app[0] * sizeof(MDB_ID)); - ids[0] += app[0]; - return 0; -} - -int mdb_midl_append_range( MDB_IDL *idp, MDB_ID id, unsigned n ) -{ - MDB_ID *ids = *idp, len = ids[0]; - /* Too big? */ - if (len + n > ids[-1]) { - if (mdb_midl_grow(idp, n | MDB_IDL_UM_MAX)) - return ENOMEM; - ids = *idp; - } - ids[0] = len + n; - ids += len; - while (n) - ids[n--] = id++; - return 0; -} - -void mdb_midl_xmerge( MDB_IDL idl, MDB_IDL merge ) -{ - MDB_ID old_id, merge_id, i = merge[0], j = idl[0], k = i+j, total = k; - idl[0] = (MDB_ID)-1; /* delimiter for idl scan below */ - old_id = idl[j]; - while (i) { - merge_id = merge[i--]; - for (; old_id < merge_id; old_id = idl[--j]) - idl[k--] = old_id; - idl[k--] = merge_id; - } - idl[0] = total; -} - -/* Quicksort + Insertion sort for small arrays */ - -#define SMALL 8 -#define MIDL_SWAP(a,b) { itmp=(a); (a)=(b); (b)=itmp; } - -void -mdb_midl_sort( MDB_IDL ids ) -{ - /* Max possible depth of int-indexed tree * 2 items/level */ - int istack[sizeof(int)*CHAR_BIT * 2]; - int i,j,k,l,ir,jstack; - MDB_ID a, itmp; - - ir = (int)ids[0]; - l = 1; - jstack = 0; - for(;;) { - if (ir - l < SMALL) { /* Insertion sort */ - for (j=l+1;j<=ir;j++) { - a = ids[j]; - for (i=j-1;i>=1;i--) { - if (ids[i] >= a) break; - ids[i+1] = ids[i]; - } - ids[i+1] = a; - } - if (jstack == 0) break; - ir = istack[jstack--]; - l = istack[jstack--]; - } else { - k = (l + ir) >> 1; /* Choose median of left, center, right */ - MIDL_SWAP(ids[k], ids[l+1]); - if (ids[l] < ids[ir]) { - MIDL_SWAP(ids[l], ids[ir]); - } - if (ids[l+1] < ids[ir]) { - MIDL_SWAP(ids[l+1], ids[ir]); - } - if (ids[l] < ids[l+1]) { - MIDL_SWAP(ids[l], ids[l+1]); - } - i = l+1; - j = ir; - a = ids[l+1]; - for(;;) { - do i++; while(ids[i] > a); - do j--; while(ids[j] < a); - if (j < i) break; - MIDL_SWAP(ids[i],ids[j]); - } - ids[l+1] = ids[j]; - ids[j] = a; - jstack += 2; - if (ir-i+1 >= j-l) { - istack[jstack] = ir; - istack[jstack-1] = i; - ir = j-1; - } else { - istack[jstack] = j-1; - istack[jstack-1] = l; - l = i; - } - } - } -} - -unsigned mdb_mid2l_search( MDB_ID2L ids, MDB_ID id ) -{ - /* - * binary search of id in ids - * if found, returns position of id - * if not found, returns first position greater than id - */ - unsigned base = 0; - unsigned cursor = 1; - int val = 0; - unsigned n = (unsigned)ids[0].mid; - - while( 0 < n ) { - unsigned pivot = n >> 1; - cursor = base + pivot + 1; - val = CMP( id, ids[cursor].mid ); - - if( val < 0 ) { - n = pivot; - - } else if ( val > 0 ) { - base = cursor; - n -= pivot + 1; - - } else { - return cursor; - } - } - - if( val > 0 ) { - ++cursor; - } - return cursor; -} - -int mdb_mid2l_insert( MDB_ID2L ids, MDB_ID2 *id ) -{ - unsigned x, i; - - x = mdb_mid2l_search( ids, id->mid ); - - if( x < 1 ) { - /* internal error */ - return -2; - } - - if ( x <= ids[0].mid && ids[x].mid == id->mid ) { - /* duplicate */ - return -1; - } - - if ( ids[0].mid >= MDB_IDL_UM_MAX ) { - /* too big */ - return -2; - - } else { - /* insert id */ - ids[0].mid++; - for (i=(unsigned)ids[0].mid; i>x; i--) - ids[i] = ids[i-1]; - ids[x] = *id; - } - - return 0; -} - -int mdb_mid2l_append( MDB_ID2L ids, MDB_ID2 *id ) -{ - /* Too big? */ - if (ids[0].mid >= MDB_IDL_UM_MAX) { - return -2; - } - ids[0].mid++; - ids[ids[0].mid] = *id; - return 0; -} - -#ifdef MDB_VL32 -unsigned mdb_mid3l_search( MDB_ID3L ids, MDB_ID id ) -{ - /* - * binary search of id in ids - * if found, returns position of id - * if not found, returns first position greater than id - */ - unsigned base = 0; - unsigned cursor = 1; - int val = 0; - unsigned n = (unsigned)ids[0].mid; - - while( 0 < n ) { - unsigned pivot = n >> 1; - cursor = base + pivot + 1; - val = CMP( id, ids[cursor].mid ); - - if( val < 0 ) { - n = pivot; - - } else if ( val > 0 ) { - base = cursor; - n -= pivot + 1; - - } else { - return cursor; - } - } - - if( val > 0 ) { - ++cursor; - } - return cursor; -} - -int mdb_mid3l_insert( MDB_ID3L ids, MDB_ID3 *id ) -{ - unsigned x, i; - - x = mdb_mid3l_search( ids, id->mid ); - - if( x < 1 ) { - /* internal error */ - return -2; - } - - if ( x <= ids[0].mid && ids[x].mid == id->mid ) { - /* duplicate */ - return -1; - } - - /* insert id */ - ids[0].mid++; - for (i=(unsigned)ids[0].mid; i>x; i--) - ids[i] = ids[i-1]; - ids[x] = *id; - - return 0; -} -#endif /* MDB_VL32 */ - -/** @} */ -/** @} */ diff --git a/vendors/ocaml-lmdb/src/midl.h b/vendors/ocaml-lmdb/src/midl.h deleted file mode 100644 index a0d5727cf0cd..000000000000 --- a/vendors/ocaml-lmdb/src/midl.h +++ /dev/null @@ -1,204 +0,0 @@ -/** @file midl.h - * @brief LMDB ID List header file. - * - * This file was originally part of back-bdb but has been - * modified for use in libmdb. Most of the macros defined - * in this file are unused, just left over from the original. - * - * This file is only used internally in libmdb and its definitions - * are not exposed publicly. - */ -/* $OpenLDAP$ */ -/* This work is part of OpenLDAP Software . - * - * Copyright 2000-2018 The OpenLDAP Foundation. - * Portions Copyright 2001-2018 Howard Chu, Symas Corp. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted only as authorized by the OpenLDAP - * Public License. - * - * A copy of this license is available in the file LICENSE in the - * top-level directory of the distribution or, alternatively, at - * . - */ - -#ifndef _MDB_MIDL_H_ -#define _MDB_MIDL_H_ - -#include "lmdb.h" - -#ifdef __cplusplus -extern "C" { -#endif - -/** @defgroup internal LMDB Internals - * @{ - */ - -/** @defgroup idls ID List Management - * @{ - */ - /** A generic unsigned ID number. These were entryIDs in back-bdb. - * Preferably it should have the same size as a pointer. - */ -typedef mdb_size_t MDB_ID; - - /** An IDL is an ID List, a sorted array of IDs. The first - * element of the array is a counter for how many actual - * IDs are in the list. In the original back-bdb code, IDLs are - * sorted in ascending order. For libmdb IDLs are sorted in - * descending order. - */ -typedef MDB_ID *MDB_IDL; - -/* IDL sizes - likely should be even bigger - * limiting factors: sizeof(ID), thread stack size - */ -#ifdef MDB_VL32 -#define MDB_IDL_LOGN 14 /* DB_SIZE is 2^14, UM_SIZE is 2^15 */ -#else -#define MDB_IDL_LOGN 16 /* DB_SIZE is 2^16, UM_SIZE is 2^17 */ -#endif -#define MDB_IDL_DB_SIZE (1< invalid_arg "assert_error" - | Error e -> if e <> err then invalid_arg "assert_error" - -let assert_equal_ba expected ba = - assert (expected = Cstruct.(to_string (of_bigarray ba))) - -let version () = - let { major ; minor ; patch } = version () in - assert (major = 0) ; - assert (minor = 9) ; - assert (patch = 70) - -let test_string_of_error () = - let errmsg = string_of_error KeyExist in - assert (String.length errmsg > 0) - -let tmpdir = Filename.get_temp_dir_name () - -let cleanup () = - let files = [ - Filename.concat tmpdir "data.mdb" ; - Filename.concat tmpdir "lock.mdb" - ] in - ListLabels.iter files ~f:begin fun fn -> - Sys.(if file_exists fn then remove fn) - end - -let env () = - cleanup () ; - opendir ~maxreaders:34 ~maxdbs:1 tmpdir 0o644 >>= fun env -> - let _stat = stat env in - let _envinfo = envinfo env in - let _flags = get_flags env in - let _path = get_path env in - let _fd = get_fd env in - let _maxreaders = get_maxreaders env in - let _maxkeysize = get_maxkeysize env in - sync env >>= fun () -> - Ok () - -let txn () = - cleanup () ; - opendir ~maxdbs:1 tmpdir 0o644 >>= fun env -> - create_ro_txn env >>= fun rotxn -> - reset_ro_txn rotxn ; - create_rw_txn env >>= fun rwtxn -> - assert (rwtxn = rwtxn) ; - let env2 = get_txn_env rwtxn in - assert (env = env2) ; - opendb rwtxn >>= fun _ -> - opendb ~flags:[Create] rwtxn ~name:"bleh" >>= fun dbi -> - put_string rwtxn dbi "test" "test" >>= fun () -> - get rwtxn dbi "test" >>= fun buffer -> - assert_equal_ba "test" buffer ; - assert_error KeyNotFound (del rwtxn dbi "bleh") ; - del rwtxn dbi "test" >>= fun () -> - db_stat rwtxn dbi >>= fun _stat -> - db_flags rwtxn dbi >>= fun _flags -> - db_drop rwtxn dbi >>= fun () -> - closedir env ; - Ok () - -let cursors () = - cleanup () ; - opendir tmpdir 0o644 >>= fun env -> - create_rw_txn env >>= fun txn -> - opendb txn >>= fun db -> - opencursor txn db >>= fun cursor -> - assert_error KeyNotFound (cursor_first cursor) ; - assert_error KeyNotFound (cursor_last cursor) ; - cursor_put_string cursor "test" "test" >>= fun () -> - cursor_put_string cursor "test2" "test2" >>= fun () -> - sync env >>= fun () -> - cursor_first cursor >>= fun () -> - cursor_at cursor "" >>= fun () -> - assert_error KeyNotFound (cursor_prev cursor) ; - cursor_last cursor >>= fun () -> - assert_error KeyNotFound (cursor_next cursor) ; - cursor_prev cursor >>= fun () -> - get txn db "test" >>= fun buf -> - assert_equal_ba "test" buf ; - cursor_get cursor >>= fun (k, v) -> - assert_equal_ba "test" k ; - assert_equal_ba "test" v ; - closedir env ; - Ok () - -let cursors_del () = - cleanup () ; - opendir tmpdir 0o644 >>= fun env -> - with_rw_db env ~f:begin fun txn db -> - with_cursor txn db ~f:begin fun cursor -> - cursor_put_string cursor "k1" "v1" >>= fun () -> - cursor_first cursor >>= fun () -> - cursor_fold_left cursor ~init:() ~f:begin fun _acc (_k, _v) -> - cursor_del cursor - end >>= fun () -> - assert_error KeyNotFound (cursor_first cursor) ; - Ok () - end - end - -let cursors_del4 () = - cleanup () ; - opendir tmpdir 0o644 >>= fun env -> - with_rw_db env ~f:begin fun txn db -> - with_cursor txn db ~f:begin fun cursor -> - cursor_put_string cursor "k1" "v1" >>= fun () -> - cursor_put_string cursor "k2" "v2" >>= fun () -> - cursor_put_string cursor "k3" "v3" >>= fun () -> - cursor_put_string cursor "k4" "v4" >>= fun () -> - cursor_first cursor >>= fun () -> - cursor_fold_left cursor ~init:() ~f:begin fun _acc (_k, _v) -> - cursor_del cursor - end >>= fun () -> - assert_error KeyNotFound (cursor_first cursor) ; - Ok () - end - end - -let fold () = - cleanup () ; - opendir tmpdir 0o644 >>= fun env -> - with_rw_db env ~f:begin fun txn db -> - opencursor txn db >>= fun cursor -> - cursor_put_string cursor "k1" "v1" >>= fun () -> - cursor_put_string cursor "k2" "v2" >>= fun () -> - cursor_put_string cursor "k3" "v3" >>= fun () -> - cursor_put_string cursor "k4" "v4" >>= fun () -> - cursor_first cursor >>= fun () -> - cursor_fold_left ~f:begin fun i (k, v) -> - assert_equal_ba ("k" ^ (string_of_int i)) k ; - assert_equal_ba ("v" ^ (string_of_int i)) v ; - Ok (succ i) - end ~init:1 cursor >>= fun _ -> - Ok () - end >>= fun () -> - closedir env ; - Ok () - -let consistency () = - cleanup () ; - opendir tmpdir 0o644 >>= fun env -> - let v = Cstruct.(to_bigarray (of_string "bleh")) in - with_rw_db env ~f:begin fun txn db -> - put txn db "bleh" v - end >>= fun () -> - with_ro_db env ~f:begin fun txn db -> - get txn db "bleh" >>= fun v' -> - (* assert (v = v') ; *) - assert_equal_ba "bleh" v' ; - Ok () - end >>= fun () -> - Ok () - -let fail_on_error f () = - match f () with - | Ok _ -> () - | Error err -> failwith (string_of_error err) - -let basic = [ - "version", `Quick, version ; - "string_of_error", `Quick, test_string_of_error ; - "env", `Quick, fail_on_error env ; - "txn", `Quick, fail_on_error txn ; - "cursors", `Quick, fail_on_error cursors ; - "cursors_del", `Quick, fail_on_error cursors_del ; - "cursors_del4", `Quick, fail_on_error cursors_del4 ; - "fold", `Quick, fail_on_error fold ; - "consistency", `Quick, fail_on_error consistency ; -] - -let () = - Alcotest.run "lmdb" [ - "basic", basic ; - ] diff --git a/vendors/ocaml-lmdb/tezos-lmdb.opam b/vendors/ocaml-lmdb/tezos-lmdb.opam deleted file mode 100644 index 4060f0a0f8f2..000000000000 --- a/vendors/ocaml-lmdb/tezos-lmdb.opam +++ /dev/null @@ -1,20 +0,0 @@ -opam-version: "2.0" -name: "tezos-lmdb" -version: "0.1" -authors: "Vincent Bernardoff " -maintainer: "Vincent Bernardoff " -license: "ISC" -synopsis: "Simple OCaml binding to Lightning Memory-Mapped Database from Symas" -homepage: "https://github.com/vbmithr/ocaml-lmdb" -bug-reports: "https://github.com/vbmithr/ocaml-lmdb/issues" -dev-repo: "git://github.com/vbmithr/ocaml-lmdb" -build: [ - ["dune" "build" "-j" jobs "-p" name "@install"] - ["dune" "runtest" "-p" name "-j" jobs] {with-test} -] -depends: [ - "dune" {build & >= "1.7"} - "rresult" {>= "0.5.0"} - "cstruct" {with-test & >= "3.2.1"} - "alcotest" {with-test & >= "0.8.1"} -] -- GitLab From aa5d84d288adbeb1b14a425adbcc8e18120fcbee Mon Sep 17 00:00:00 2001 From: Victor Allombert Date: Fri, 10 Dec 2021 11:48:51 +0100 Subject: [PATCH 5/6] Tests/Python: update test-results.xml --- tests_python/test-results.xml | 12126 ++++++++++++++++---------------- 1 file changed, 6029 insertions(+), 6097 deletions(-) diff --git a/tests_python/test-results.xml b/tests_python/test-results.xml index d002c79ec25a..36249f23292c 100644 --- a/tests_python/test-results.xml +++ b/tests_python/test-results.xml @@ -1,6127 +1,6059 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + /home/tezos/.venv/lib/python3.9/site-packages/_pytest/skipping.py:245: Bug in annotation system - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + /home/tezos/.venv/lib/python3.9/site-packages/_pytest/skipping.py:245: Bug in annotation system - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + /home/tezos/.venv/lib/python3.9/site-packages/_pytest/skipping.py:245: Bug in annotation system - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + /builds/tezos/tezos/tests_python/tests_010/test_tls.py:22: requires to install a custom CA - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + /builds/tezos/tezos/tests_python/tests_011/test_tls.py:22: requires to install a custom CA - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + /builds/tezos/tezos/tests_python/tests_alpha/test_tls.py:22: requires to install a custom CA - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + -- GitLab From c2669cc1171b40647cd129a110da3d266b2ec87d Mon Sep 17 00:00:00 2001 From: Victor Allombert Date: Fri, 10 Dec 2021 10:45:29 +0100 Subject: [PATCH 6/6] Changelog: remove the support of the legacy store --- CHANGES.rst | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/CHANGES.rst b/CHANGES.rst index 2aed0edd8f1f..0dc62119d99d 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -121,6 +121,11 @@ Node `multiple transfers` command to learn more. In addition, operations rejected because of this limitation are solely delayed to a future block. +- Removed support for store versions 0.0.4 (used by Octez 9.7) or below. + It is no longer possible to run ``tezos-node upgrade storage`` to upgrade + from those older versions. It is also no longer possible to import + snapshots that were exported using this version. + Client ------ -- GitLab