diff --git a/manifest/main.ml b/manifest/main.ml index e0fd02bd9b0cd68738c8111ce89f9226164d2b9a..650a76d44c9b8ee261ef8cc2d6f00fe6863fa49f 100644 --- a/manifest/main.ml +++ b/manifest/main.ml @@ -3183,14 +3183,30 @@ let octez_layer2_store = ~synopsis:"Tezos: layer2 storage utils" ~deps: [ + index; octez_base |> open_ ~m:"TzPervasives"; irmin_pack; irmin_pack_unix; irmin; + aches_lwt; + octez_stdlib_unix |> open_; octez_context_encoding; ] ~linkall:true +let _octez_layer2_indexed_store_test = + test + "test_indexed_store" + ~path:"src/lib_layer2_store/test/" + ~opam:"tezos-layer2-store" + ~deps: + [ + octez_error_monad |> open_ |> open_ ~m:"TzLwtreslib"; + octez_layer2_store |> open_; + qcheck_alcotest; + alcotest_lwt; + ] + let octez_dal_node_services = private_lib "tezos_dal_node_services" diff --git a/opam/tezos-layer2-store.opam b/opam/tezos-layer2-store.opam index a2c30c9ad19a694d7bf0868bd0ef13d4385e5db2..870cd410cbe6cd3757b27a7cc5e15a02c0c1db48 100644 --- a/opam/tezos-layer2-store.opam +++ b/opam/tezos-layer2-store.opam @@ -9,10 +9,16 @@ dev-repo: "git+https://gitlab.com/tezos/tezos.git" license: "MIT" depends: [ "dune" { >= "3.0" } + "index" { >= "1.6.0" & < "1.7.0" } "tezos-base" "irmin-pack" { >= "3.5.1" & < "3.6.0" } "irmin" { >= "3.5.1" & < "3.6.0" } + "aches-lwt" { >= "1.0.0" } + "tezos-stdlib-unix" "tezos-context" + "tezos-error-monad" {with-test} + "qcheck-alcotest" { with-test & >= "0.20" } + "alcotest-lwt" { with-test & >= "1.5.0" } ] build: [ ["rm" "-r" "vendors"] diff --git a/src/lib_layer2_store/dune b/src/lib_layer2_store/dune index 301e0e2414539921aba7df084c490d97833c4507..70f9934f1492189e85f75365e2eeeac5cba1a84a 100644 --- a/src/lib_layer2_store/dune +++ b/src/lib_layer2_store/dune @@ -6,12 +6,16 @@ (package tezos-layer2-store) (instrumentation (backend bisect_ppx)) (libraries + index tezos-base irmin-pack irmin-pack.unix irmin + aches-lwt + tezos-stdlib-unix tezos-context.encoding) (library_flags (:standard -linkall)) (flags (:standard) - -open Tezos_base.TzPervasives)) + -open Tezos_base.TzPervasives + -open Tezos_stdlib_unix)) diff --git a/src/lib_layer2_store/indexed_store.ml b/src/lib_layer2_store/indexed_store.ml new file mode 100644 index 0000000000000000000000000000000000000000..e530dee7b80e6f3e8752dd27583c2e6d85d3a53c --- /dev/null +++ b/src/lib_layer2_store/indexed_store.ml @@ -0,0 +1,657 @@ +(*****************************************************************************) +(* *) +(* Open Source License *) +(* Copyright (c) 2023 Nomadic Labs, *) +(* *) +(* Permission is hereby granted, free of charge, to any person obtaining a *) +(* copy of this software and associated documentation files (the "Software"),*) +(* to deal in the Software without restriction, including without limitation *) +(* the rights to use, copy, modify, merge, publish, distribute, sublicense, *) +(* and/or sell copies of the Software, and to permit persons to whom the *) +(* Software is furnished to do so, subject to the following conditions: *) +(* *) +(* The above copyright notice and this permission notice shall be included *) +(* in all copies or substantial portions of the Software. *) +(* *) +(* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR*) +(* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, *) +(* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL *) +(* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER*) +(* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING *) +(* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER *) +(* DEALINGS IN THE SOFTWARE. *) +(* *) +(*****************************************************************************) + +open Store_sigs + +type error += + | Cannot_load_store of string * string + | Cannot_write_to_store of string + | Cannot_read_from_store of string + | Decoding_error of Data_encoding.Binary.read_error + +let () = + register_error_kind + ~id:"layer2_store.cannot_load_store" + ~title:"Store cannot be loaded" + ~description:"Store cannot be loaded." + ~pp:(fun ppf (name, path) -> + Format.fprintf ppf "Store %s cannot be loaded from %s." name path) + `Permanent + Data_encoding.(obj2 (req "name" string) (req "path" string)) + (function Cannot_load_store (n, p) -> Some (n, p) | _ -> None) + (fun (n, p) -> Cannot_load_store (n, p)) + +let () = + register_error_kind + ~id:"layer2_store.cannot_write_to_store" + ~title:"Value cannot be written to store" + ~description:"Value cannot be written to store." + ~pp:(fun ppf name -> + Format.fprintf ppf "Value cannot be written to store %s." name) + `Permanent + Data_encoding.(obj1 (req "name" string)) + (function Cannot_write_to_store n -> Some n | _ -> None) + (fun n -> Cannot_write_to_store n) + +let () = + register_error_kind + ~id:"layer2_store.cannot_read_from_store" + ~title:"Value cannot be read from store" + ~description:"Value cannot be read from store." + ~pp:(fun ppf name -> + Format.fprintf ppf "Value cannot be read from store %s." name) + `Permanent + Data_encoding.(obj1 (req "name" string)) + (function Cannot_read_from_store n -> Some n | _ -> None) + (fun n -> Cannot_read_from_store n) + +let () = + register_error_kind + ~id:"layer2_store.decoding_error" + ~title:"Cannot decode file" + ~description:"A file for a persistent element could not be decoded" + ~pp:(fun ppf error -> + Format.fprintf + ppf + "Decoding error: %a" + Data_encoding.Json.pp + (Data_encoding.Json.construct + Data_encoding.Binary.read_error_encoding + error)) + `Permanent + Data_encoding.(obj1 (req "error" Data_encoding.Binary.read_error_encoding)) + (function Decoding_error e -> Some e | _ -> None) + (fun e -> Decoding_error e) + +(* Helper functions to copy byte sequences or integers in [src] to another byte + sequence [dst] at offset [offset], with named arguments to avoid + confusion. These functions return the offset in the destination at which to + copy the more data. *) + +let blit ~src ~dst offset = + let len = Bytes.length src in + Bytes.blit src 0 dst offset len ; + offset + len + +let bytes_set_int64 ~src ~dst offset = + Bytes.set_int64_be dst offset src ; + offset + 8 + +let bytes_set_int8 ~src ~dst offset = + Bytes.set_int8 dst offset src ; + offset + 1 + +(* Helper functions to read data (strings with a decoding function, or integers) + from a binary string. These functions return, as the second component, the + offset in the string at which to read more data. *) + +let read_int64 str offset = + let i = TzEndian.get_int64_string str offset in + (i, offset + 8) + +let read_int8 str offset = + let i = TzEndian.get_int8_string str offset in + (i, offset + 1) + +(* Functors to build stores on indexes *) + +module type NAME = sig + val name : string +end + +module type SINGLETON_STORE = sig + type +'a t + + type value + + val load : path:string -> 'a mode -> 'a t tzresult Lwt.t + + val read : [> `Read] t -> value option tzresult Lwt.t + + val write : [> `Write] t -> value -> unit tzresult Lwt.t + + val delete : [> `Write] t -> unit tzresult Lwt.t + + val readonly : [> `Read] t -> [`Read] t +end + +module type INDEXABLE_STORE = sig + type +'a t + + type key + + type value + + val load : path:string -> 'a mode -> 'a t tzresult Lwt.t + + val mem : [> `Read] t -> key -> bool tzresult Lwt.t + + val find : [> `Read] t -> key -> value option tzresult Lwt.t + + val add : ?flush:bool -> [> `Write] t -> key -> value -> unit tzresult Lwt.t + + val close : _ t -> unit tzresult Lwt.t + + val readonly : [> `Read] t -> [`Read] t +end + +module type INDEXABLE_REMOVABLE_STORE = sig + include INDEXABLE_STORE + + val remove : ?flush:bool -> [> `Write] t -> key -> unit tzresult Lwt.t +end + +module type INDEXED_FILE = sig + type +'a t + + type key + + type header + + type value + + val mem : [> `Read] t -> key -> bool tzresult Lwt.t + + val header : [> `Read] t -> key -> header option tzresult Lwt.t + + val read : [> `Read] t -> key -> (value * header) option tzresult Lwt.t + + val append : + ?flush:bool -> + [> `Write] t -> + key:key -> + header:header -> + value:value -> + unit tzresult Lwt.t + + val load : path:string -> cache_size:int -> 'a mode -> 'a t tzresult Lwt.t + + val close : _ t -> unit tzresult Lwt.t + + val readonly : [> `Read] t -> [`Read] t +end + +module type SIMPLE_INDEXED_FILE = sig + include INDEXED_FILE + + val append : + ?flush:bool -> [> `Write] t -> key:key -> value:value -> unit tzresult Lwt.t +end + +module type ENCODABLE_VALUE = sig + type t + + val name : string + + val encoding : t Data_encoding.t +end + +module type FIXED_ENCODABLE_VALUE = sig + include ENCODABLE_VALUE + + val fixed_size : int +end + +module type ENCODABLE_VALUE_HEADER = sig + include ENCODABLE_VALUE + + module Header : FIXED_ENCODABLE_VALUE +end + +module Make_fixed_encodable (V : ENCODABLE_VALUE) : + FIXED_ENCODABLE_VALUE with type t = V.t = struct + include V + + let fixed_size = + match Data_encoding.Binary.fixed_length encoding with + | None -> Stdlib.invalid_arg (name ^ " encoding is not fixed size") + | Some size -> size +end + +module Make_index_value (E : FIXED_ENCODABLE_VALUE) : + Index.Value.S with type t = E.t = struct + type t = E.t + + let encoded_size = E.fixed_size + + let encode v = + Data_encoding.Binary.to_string_exn ~buffer_size:encoded_size E.encoding v + + let decode buf offset = + let _read_bytes, v = + Data_encoding.Binary.read_exn E.encoding buf offset encoded_size + in + v + + (* The {!Repr.t} value is only used for pretty printing in {!Index} so this is + fine. *) + let t = Repr.map Repr.string (fun s -> decode s 0) encode +end + +module Make_index_key (E : sig + include FIXED_ENCODABLE_VALUE + + val equal : t -> t -> bool +end) : Index.Key.S with type t = E.t = struct + include Make_index_value (E) + + let equal = E.equal + + let hash v = Stdlib.Hashtbl.hash (encode v) + + (* {!Stdlib.Hashtbl.hash} is 30 bits *) + let hash_size = 30 (* in bits *) +end + +module Make_indexable (N : NAME) (K : Index.Key.S) (V : Index.Value.S) = struct + module I = Index_unix.Make (K) (V) (Index.Cache.Unbounded) + + type _ t = {index : I.t; scheduler : Lwt_idle_waiter.t} + + (* TODO: https://gitlab.com/tezos/tezos/-/issues/4654 + Make log size constant configurable. *) + let log_size = 10_000 + + let mem store k = + let open Lwt_result_syntax in + trace (Cannot_read_from_store N.name) + @@ protect + @@ fun () -> + Lwt_idle_waiter.task store.scheduler @@ fun () -> + return (I.mem store.index k) + + let find store k = + let open Lwt_result_syntax in + trace (Cannot_read_from_store N.name) + @@ protect + @@ fun () -> + Lwt_idle_waiter.task store.scheduler @@ fun () -> + let v = try Some (I.find store.index k) with Not_found -> None in + return v + + let add ?(flush = true) store k v = + let open Lwt_result_syntax in + trace (Cannot_write_to_store N.name) + @@ protect + @@ fun () -> + Lwt_idle_waiter.force_idle store.scheduler @@ fun () -> + I.replace store.index k v ; + if flush then I.flush store.index ; + return_unit + + let load (type a) ~path (mode : a mode) : a t tzresult Lwt.t = + let open Lwt_result_syntax in + trace (Cannot_load_store (N.name, path)) + @@ protect + @@ fun () -> + let*! () = Lwt_utils_unix.create_dir (Filename.dirname path) in + let readonly = match mode with Read_only -> true | Read_write -> false in + let index = I.v ~log_size ~readonly path in + let scheduler = Lwt_idle_waiter.create () in + return {index; scheduler} + + let close store = + let open Lwt_result_syntax in + protect @@ fun () -> + Lwt_idle_waiter.force_idle store.scheduler @@ fun () -> + (try I.close store.index with Index.Closed -> ()) ; + return_unit + + let readonly x = (x :> [`Read] t) +end + +module Make_indexable_removable (N : NAME) (K : Index.Key.S) (V : Index.Value.S) = +struct + module V_opt = struct + (* The values stored in the index are optional values. When we "remove" a + key from the store, we're not really removing it from the index, but + simply setting its association to [None] (encoded with zero bytes here). + *) + + type t = V.t option + + let t = Repr.option V.t + + let encoded_size = 1 + V.encoded_size + + let encode v = + let dst = Bytes.create encoded_size in + let tag, value_bytes = + match v with + | None -> (0, Bytes.make V.encoded_size '\000') + | Some v -> (1, V.encode v |> Bytes.unsafe_of_string) + in + let offset = bytes_set_int8 ~dst ~src:tag 0 in + let _ = blit ~src:value_bytes ~dst offset in + Bytes.unsafe_to_string dst + + let decode str offset = + let tag, offset = read_int8 str offset in + match tag with + | 0 -> None + | 1 -> + let value = V.decode str offset in + Some value + | _ -> assert false + end + + include Make_indexable (N) (K) (V_opt) + + let find store k = + let open Lwt_result_syntax in + let+ v = find store k in + match v with None | Some None -> None | Some (Some v) -> Some v + + let mem store hash = + let open Lwt_result_syntax in + let+ b = find store hash in + Option.is_some b + + let add ?flush store k v = add ?flush store k (Some v) + + let remove ?(flush = true) store k = + let open Lwt_result_syntax in + trace (Cannot_write_to_store N.name) + @@ protect + @@ fun () -> + Lwt_idle_waiter.force_idle store.scheduler @@ fun () -> + let exists = I.mem store.index k in + if not exists then return_unit + else ( + I.replace store.index k None ; + if flush then I.flush store.index ; + return_unit) +end + +module Make_singleton (S : sig + type t + + val name : string + + val encoding : t Data_encoding.t +end) : SINGLETON_STORE with type value := S.t = struct + type +'a t = {file : string; mutable cache : S.t option option} + + let read_disk store = + let open Lwt_result_syntax in + trace (Cannot_read_from_store S.name) + @@ protect + @@ fun () -> + let*! exists = Lwt_unix.file_exists store.file in + match exists with + | false -> return_none + | true -> ( + Lwt_io.with_file + ~flags:[Unix.O_RDONLY; O_CLOEXEC] + ~mode:Input + store.file + @@ fun channel -> + let*! raw_data = Lwt_io.read channel in + let data = Data_encoding.Binary.of_string S.encoding raw_data in + match data with + | Ok data -> return_some data + | Error err -> tzfail (Decoding_error err)) + + let read store = + let open Lwt_result_syntax in + match store.cache with Some v -> return v | None -> read_disk store + + let write_disk store x = + let open Lwt_result_syntax in + trace (Cannot_write_to_store S.name) + @@ let*! res = + Lwt_utils_unix.with_atomic_open_out ~overwrite:true store.file + @@ fun fd -> + let block_bytes = Data_encoding.Binary.to_bytes_exn S.encoding x in + Lwt_utils_unix.write_bytes fd block_bytes + in + Result.bind_error res Lwt_utils_unix.tzfail_of_io_error |> Lwt.return + + let write store x = + let open Lwt_result_syntax in + let+ () = write_disk store x in + store.cache <- Some (Some x) + + let delete_disk store = + let open Lwt_result_syntax in + trace (Cannot_write_to_store S.name) + @@ protect + @@ fun () -> + let*! exists = Lwt_unix.file_exists store.file in + match exists with + | false -> return_unit + | true -> + let*! () = Lwt_unix.unlink store.file in + return_unit + + let delete store = + let open Lwt_result_syntax in + let+ () = delete_disk store in + store.cache <- Some None + + let load ~path _mode = + let open Lwt_result_syntax in + trace (Cannot_load_store (S.name, path)) + @@ protect + @@ fun () -> + let*! () = Lwt_utils_unix.create_dir (Filename.dirname path) in + return {file = path; cache = None} + + let readonly x = (x :> [`Read] t) +end + +module Make_indexed_file + (N : NAME) + (K : Index.Key.S) + (V : ENCODABLE_VALUE_HEADER) = +struct + module Cache = + Aches_lwt.Lache.Make_option (Aches.Rache.Transfer (Aches.Rache.LRU) (K)) + module Raw_header = Make_index_value (V.Header) + + module IHeader = struct + let name = N.name ^ ".header" + + type t = {offset : int; header : V.Header.t} + + let encoded_size = 8 (* offset *) + Raw_header.encoded_size + + let t = + let open Repr in + map + (pair int Raw_header.t) + (fun (offset, header) -> {offset; header}) + (fun {offset; header} -> (offset, header)) + + let encode v = + let dst = Bytes.create encoded_size in + let offset = bytes_set_int64 ~src:(Int64.of_int v.offset) ~dst 0 in + let _offset = + blit ~src:(Raw_header.encode v.header |> String.to_bytes) ~dst offset + in + Bytes.unsafe_to_string dst + + let decode str offset = + let file_offset, offset = read_int64 str offset in + let header = Raw_header.decode str offset in + {offset = Int64.to_int file_offset; header} + end + + module Header_index = Index_unix.Make (K) (IHeader) (Index.Cache.Unbounded) + + module Values_file = struct + let encoding = Data_encoding.dynamic_size ~kind:`Uint30 V.encoding + + let pread_value fd ~file_offset = + let open Lwt_result_syntax in + trace (Cannot_read_from_store N.name) + @@ protect + @@ fun () -> + (* Read length *) + let length_bytes = Bytes.create 4 in + let*! () = + Lwt_utils_unix.read_bytes ~file_offset ~pos:0 ~len:4 fd length_bytes + in + let value_length_int32 = Bytes.get_int32_be length_bytes 0 in + let value_length = Int32.to_int value_length_int32 in + let value_bytes = Bytes.extend length_bytes 0 value_length in + let*! () = + Lwt_utils_unix.read_bytes + ~file_offset:(file_offset + 4) + ~pos:4 + ~len:value_length + fd + value_bytes + in + match Data_encoding.Binary.of_bytes encoding value_bytes with + | Ok value -> return (value, 4 + value_length) + | Error err -> tzfail (Decoding_error err) + end + + type +'a t = { + index : Header_index.t; + fd : Lwt_unix.file_descr; + scheduler : Lwt_idle_waiter.t; + cache : (V.t * V.Header.t) tzresult Cache.t; + } + + (* The log_size corresponds to the maximum size of the memory zone + allocated in memory before flushing it onto the disk. It is + basically a cache which is use for the index. The cache size is + `log_size * log_entry` where a `log_entry` is roughly 56 bytes. *) + (* TODO: https://gitlab.com/tezos/tezos/-/issues/4654 + Make log size constant configurable. *) + let blocks_log_size = 10_000 + + let mem store key = + let open Lwt_result_syntax in + trace (Cannot_read_from_store IHeader.name) + @@ protect + @@ fun () -> + Lwt_idle_waiter.task store.scheduler @@ fun () -> + return (Header_index.mem store.index key) + + let header store key = + let open Lwt_result_syntax in + trace (Cannot_read_from_store IHeader.name) + @@ protect + @@ fun () -> + Lwt_idle_waiter.task store.scheduler @@ fun () -> + try + let {IHeader.header; _} = Header_index.find store.index key in + return_some header + with Not_found -> return_none + + let read store key = + Lwt_idle_waiter.task store.scheduler @@ fun () -> + let read_from_disk key = + let open Lwt_syntax in + match Header_index.find store.index key with + | exception Not_found -> Lwt.return_none + | {IHeader.offset; header} -> + let+ value = Values_file.pread_value store.fd ~file_offset:offset in + Some (Result.map (fun (value, _ofs) -> (value, header)) value) + in + let open Lwt_result_syntax in + Cache.bind_or_put store.cache key read_from_disk @@ function + | None -> return_none + | Some (Ok value) -> return_some value + | Some (Error _ as e) -> Lwt.return e + + let locked_write_value store ~offset ~value ~key ~header = + trace (Cannot_write_to_store N.name) + @@ protect + @@ fun () -> + let open Lwt_result_syntax in + let value_bytes = + Data_encoding.Binary.to_bytes_exn Values_file.encoding value + in + let value_length = Bytes.length value_bytes in + let*! () = + Lwt_utils_unix.write_bytes ~pos:0 ~len:value_length store.fd value_bytes + in + Header_index.replace store.index key {offset; header} ; + return value_length + + let append ?(flush = true) store ~key ~header ~(value : V.t) = + trace (Cannot_write_to_store N.name) + @@ protect + @@ fun () -> + let open Lwt_result_syntax in + Lwt_idle_waiter.force_idle store.scheduler @@ fun () -> + Cache.put store.cache key (Lwt.return_some (Ok (value, header))) ; + let*! offset = Lwt_unix.lseek store.fd 0 Unix.SEEK_END in + let*! _written_len = locked_write_value store ~offset ~value ~key ~header in + if flush then Header_index.flush store.index ; + return_unit + + let load (type a) ~path ~cache_size (mode : a mode) : a t tzresult Lwt.t = + let open Lwt_result_syntax in + trace (Cannot_load_store (N.name, path)) + @@ protect + @@ fun () -> + let*! () = Lwt_utils_unix.create_dir path in + let readonly = match mode with Read_only -> true | Read_write -> false in + let flag, perms = + if readonly then (Unix.O_RDONLY, 0o444) else (Unix.O_RDWR, 0o644) + in + let*! fd = + Lwt_unix.openfile + (Filename.concat path "data") + [Unix.O_CREAT; O_CLOEXEC; flag] + perms + in + let index = + Header_index.v + ~log_size:blocks_log_size + ~readonly + (Filename.concat path "index") + in + let scheduler = Lwt_idle_waiter.create () in + let cache = Cache.create cache_size in + return {index; fd; scheduler; cache} + + let close store = + protect @@ fun () -> + Lwt_idle_waiter.force_idle store.scheduler @@ fun () -> + (try Header_index.close store.index with Index.Closed -> ()) ; + Lwt_utils_unix.safe_close store.fd + + let readonly x = (x :> [`Read] t) +end + +module Make_simple_indexed_file + (N : NAME) + (K : Index.Key.S) (V : sig + include ENCODABLE_VALUE_HEADER + + val header : t -> Header.t + end) = +struct + include Make_indexed_file (N) (K) (V) + + let append ?flush store ~key ~value = + append ?flush store ~key ~value ~header:(V.header value) +end diff --git a/src/lib_layer2_store/indexed_store.mli b/src/lib_layer2_store/indexed_store.mli new file mode 100644 index 0000000000000000000000000000000000000000..d8e168b1b6fcb0275e11a32fb304dc36780bd2cc --- /dev/null +++ b/src/lib_layer2_store/indexed_store.mli @@ -0,0 +1,245 @@ +(*****************************************************************************) +(* *) +(* Open Source License *) +(* Copyright (c) 2023 Nomadic Labs, *) +(* *) +(* Permission is hereby granted, free of charge, to any person obtaining a *) +(* copy of this software and associated documentation files (the "Software"),*) +(* to deal in the Software without restriction, including without limitation *) +(* the rights to use, copy, modify, merge, publish, distribute, sublicense, *) +(* and/or sell copies of the Software, and to permit persons to whom the *) +(* Software is furnished to do so, subject to the following conditions: *) +(* *) +(* The above copyright notice and this permission notice shall be included *) +(* in all copies or substantial portions of the Software. *) +(* *) +(* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR*) +(* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, *) +(* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL *) +(* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER*) +(* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING *) +(* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER *) +(* DEALINGS IN THE SOFTWARE. *) +(* *) +(*****************************************************************************) + +(** This library provides functors to build various kinds of stores using + mirage's {{:https://github.com/mirage/index} index}} and Octez + {{:https://gitlab.com/nomadic-labs/data-encoding} data-encoding} + libraries. + + It is tailored to build stores for the Layer 2 nodes of Tezos (Tx-rollups + and Smart rollups). + + The stores built with this library support concurrent accesses thanks to the + use of the light scheduler provided by {!Lwt_idle_waiter} for exclusive + write access. *) + +open Store_sigs + +(** {2 Signatures} *) + +(** A store for single updatable values. Values are stored in a file on disk and + are kept in memory in a cache. *) +module type SINGLETON_STORE = sig + (** The type of the singleton store. *) + type +'a t + + (** The type of values stored in this singleton store. *) + type value + + (** Load (or initializes) a singleton store in the file [path]. *) + val load : path:string -> 'a mode -> 'a t tzresult Lwt.t + + (** Reads the current value from the disk. Returns [None] if the + file does not exist. *) + val read : [> `Read] t -> value option tzresult Lwt.t + + (** Write the value to disk. *) + val write : [> `Write] t -> value -> unit tzresult Lwt.t + + (** Deletes the value from the disk. *) + val delete : [> `Write] t -> unit tzresult Lwt.t + + (** [readonly t] returns a read only version of the store [t]. *) + val readonly : [> `Read] t -> [`Read] t +end + +(** An index store mapping keys to values. It uses an index file internally. *) +module type INDEXABLE_STORE = sig + (** The type of store built on indexes. *) + type +'a t + + (** The type of keys for the store. *) + type key + + (** The type of values stored in the index, *) + type value + + (** Load (or initializes) a store in the file [path]. If [readonly] is [true], + the store will only be accessed in read only mode. *) + val load : path:string -> 'a mode -> 'a t tzresult Lwt.t + + (** Returns [true] if the key has a value associated in + the store. *) + val mem : [> `Read] t -> key -> bool tzresult Lwt.t + + (** Returns the value associated to a key in the store, + or [None] otherwise. *) + val find : [> `Read] t -> key -> value option tzresult Lwt.t + + (** Add an association from a key to a value in the + store. If [flush] (default to [true]) is set, the index is written on disk + right away. *) + val add : ?flush:bool -> [> `Write] t -> key -> value -> unit tzresult Lwt.t + + (** Closes the store. After this call the store cannot be accessed anymore + (unless one calls {!load} again). *) + val close : _ t -> unit tzresult Lwt.t + + (** [readonly t] returns a read only version of the store [t]. *) + val readonly : [> `Read] t -> [`Read] t +end + +(** An index store mapping keys to values. Keys are associated to optional + values in the index which allows them to be removed. *) +module type INDEXABLE_REMOVABLE_STORE = sig + include INDEXABLE_STORE + + (** Removes an association from the store. Does nothing if the key was not + registered. *) + val remove : ?flush:bool -> [> `Write] t -> key -> unit tzresult Lwt.t +end + +(** An indexed file (i.e. a file and an index) mapping keys to values. Contrary + to {!INDEXABLE_STORE}, the values can vary in size. Internally, values are + stored, concatenated, in a append only file. The index file associates keys + to offsets in this file (and a header to retrieve information more + efficiently). +*) +module type INDEXED_FILE = sig + (** The type of indexed file store. *) + type +'a t + + (** The type of keys for the store. *) + type key + + (** The type of headers stored in the index. The header can contain fixed size + information that can be accessed more efficiently than the full value. *) + type header + + (** The type of values stored in the file. *) + type value + + (** Returns [true] if the key has a value associated in + the store. *) + val mem : [> `Read] t -> key -> bool tzresult Lwt.t + + (** Returns the header for a key if it exists in the store. *) + val header : [> `Read] t -> key -> header option tzresult Lwt.t + + (** Read a full value and header from the indexed file store. *) + val read : [> `Read] t -> key -> (value * header) option tzresult Lwt.t + + (** Append a new binding to the indexed file store. *) + val append : + ?flush:bool -> + [> `Write] t -> + key:key -> + header:header -> + value:value -> + unit tzresult Lwt.t + + (** Loads a new or existing indexed file store in the directory [path]. *) + val load : path:string -> cache_size:int -> 'a mode -> 'a t tzresult Lwt.t + + (** Close the index and the file. One must call {!load} again to read or write + data in the store. *) + val close : _ t -> unit tzresult Lwt.t + + (** [readonly t] returns a read only version of the store [t]. *) + val readonly : [> `Read] t -> [`Read] t +end + +(** Same as {!INDEXED_FILE} but where headers are extracted from values. *) +module type SIMPLE_INDEXED_FILE = sig + include INDEXED_FILE + + (** Append a new binding to the indexed file store. *) + val append : + ?flush:bool -> [> `Write] t -> key:key -> value:value -> unit tzresult Lwt.t +end + +(** Names for stores. *) +module type NAME = sig + val name : string +end + +(** Values that can be encoded. *) +module type ENCODABLE_VALUE = sig + type t + + val name : string + + val encoding : t Data_encoding.t +end + +(** Values that can be encoded and whose encoding is a fixed size. *) +module type FIXED_ENCODABLE_VALUE = sig + include ENCODABLE_VALUE + + val fixed_size : int +end + +(** Values with a given fixed size header. *) +module type ENCODABLE_VALUE_HEADER = sig + include ENCODABLE_VALUE + + module Header : FIXED_ENCODABLE_VALUE +end + +(** {2 Functors} *) + +module Make_singleton (S : ENCODABLE_VALUE) : + SINGLETON_STORE with type value := S.t + +module Make_indexable (_ : NAME) (K : Index.Key.S) (V : Index.Value.S) : + INDEXABLE_STORE with type key := K.t and type value := V.t + +module Make_indexable_removable (_ : NAME) (K : Index.Key.S) (V : Index.Value.S) : + INDEXABLE_REMOVABLE_STORE with type key := K.t and type value := V.t + +module Make_indexed_file + (_ : NAME) + (K : Index.Key.S) + (V : ENCODABLE_VALUE_HEADER) : + INDEXED_FILE + with type key := K.t + and type value := V.t + and type header := V.Header.t + +module Make_simple_indexed_file + (_ : NAME) + (K : Index.Key.S) (V : sig + include ENCODABLE_VALUE_HEADER + + val header : t -> Header.t + end) : + SIMPLE_INDEXED_FILE + with type key := K.t + and type value := V.t + and type header := V.Header.t + +(** {2 Helper functors} *) + +module Make_fixed_encodable (V : ENCODABLE_VALUE) : + FIXED_ENCODABLE_VALUE with type t = V.t + +module Make_index_value (E : FIXED_ENCODABLE_VALUE) : + Index.Value.S with type t = E.t + +module Make_index_key (E : sig + include FIXED_ENCODABLE_VALUE + + val equal : t -> t -> bool +end) : Index.Key.S with type t = E.t diff --git a/src/lib_layer2_store/test/dune b/src/lib_layer2_store/test/dune new file mode 100644 index 0000000000000000000000000000000000000000..06dd4ff8daab51ac9f9f5578bc9997eb37c99688 --- /dev/null +++ b/src/lib_layer2_store/test/dune @@ -0,0 +1,20 @@ +; This file was automatically generated, do not edit. +; Edit file manifest/main.ml instead. + +(executable + (name test_indexed_store) + (libraries + tezos-error-monad + tezos_layer2_store + qcheck-alcotest + alcotest-lwt) + (flags + (:standard) + -open Tezos_error_monad + -open Tezos_error_monad.TzLwtreslib + -open Tezos_layer2_store)) + +(rule + (alias runtest) + (package tezos-layer2-store) + (action (run %{dep:./test_indexed_store.exe}))) diff --git a/src/lib_layer2_store/test/test_indexed_store.ml b/src/lib_layer2_store/test/test_indexed_store.ml new file mode 100644 index 0000000000000000000000000000000000000000..2846417609f9ee5c326dc73b8fe92820d8306875 --- /dev/null +++ b/src/lib_layer2_store/test/test_indexed_store.ml @@ -0,0 +1,602 @@ +(*****************************************************************************) +(* *) +(* Open Source License *) +(* Copyright (c) 2023 Nomadic Labs, *) +(* *) +(* Permission is hereby granted, free of charge, to any person obtaining a *) +(* copy of this software and associated documentation files (the "Software"),*) +(* to deal in the Software without restriction, including without limitation *) +(* the rights to use, copy, modify, merge, publish, distribute, sublicense, *) +(* and/or sell copies of the Software, and to permit persons to whom the *) +(* Software is furnished to do so, subject to the following conditions: *) +(* *) +(* The above copyright notice and this permission notice shall be included *) +(* in all copies or substantial portions of the Software. *) +(* *) +(* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR*) +(* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, *) +(* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL *) +(* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER*) +(* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING *) +(* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER *) +(* DEALINGS IN THE SOFTWARE. *) +(* *) +(*****************************************************************************) + +(* Testing + ------- + Component: Layer-2 indexed store + Invocation: dune exec src/lib_layer2_store/test/test_indexed_store.exe -- -v + Subject: Test the indexed store +*) + +(** The properties checked are: + - no errors + - read value is the same as the last written one + - resulting store agrees with equivalent hash table + - reloading values from disk agrees with version with cache +*) + +open Error_monad +open Store_sigs +open Indexed_store + +(** Signature for type equipped with a generator and a pretty printing + function. *) +module type GENERATABLE = sig + type t + + val gen : t QCheck2.Gen.t + + val pp : Format.formatter -> t -> unit +end + +(** Keys as strings of 32 characters used for tests *) +module SKey = struct + let size = 32 + + let name = "key_string_32" + + include Index.Key.String_fixed (struct + let length = size + end) + + let gen = + let open QCheck2.Gen in + let size_gen = pure size in + let gen = string_size ~gen:printable size_gen in + graft_corners gen [String.init size (fun _ -> '\000')] () + + let gen_distinct distinct_keys = QCheck2.Gen.list_repeat distinct_keys gen + + let pp fmt s = Format.fprintf fmt "%S" s + + let encoding = Data_encoding.Fixed.string size +end + +(** Used for singleton store tests which do not need keys. *) +module NoKey = struct + type t = unit + + let _name = "no_key" + + let gen = QCheck2.Gen.pure () + + let pp _ () = () +end + +(** Module to generate values for the stores, as unbounded byte sequences. *) +module Value = struct + type t = bytes + + let name = "bytes_value" + + let gen = QCheck2.Gen.bytes + + let pp fmt b = Hex.of_bytes b |> Hex.show |> Format.fprintf fmt "%S" + + let encoding = Data_encoding.bytes +end + +(** Module to generate fixed size values for the stores, byte sequences (of size + 500 here). *) +module FixedValue = struct + type t = bytes + + let size = 500 + + let name = "fixed_bytes_value_500" + + let gen = + let open QCheck2.Gen in + let size_gen = pure size in + bytes_size size_gen + + let pp = Value.pp + + let encoding = Data_encoding.Fixed.bytes size +end + +module Action (Key : GENERATABLE) (Value : GENERATABLE) = struct + (** Actions for a key-value store whose keys are [Key.t] and values are + [Value.t]. *) + type t = Write of Key.t * Value.t | Read of Key.t | Delete of Key.t + + (** Generator for actions. The parameter [no_delete] indicates if the + generator should generate delete actions or not, because some append-only + stores do not support delete. *) + let gen ?(no_delete = false) k_gen = + let open QCheck2.Gen in + let* k = k_gen in + let write = + let+ v = Value.gen in + Write (k, v) + in + let read = pure (Read k) in + let delete = pure (Delete k) in + let l = if no_delete then [read; write] else [read; write; delete] in + oneof l + + let _gen_for_key k = gen (QCheck2.Gen.pure k) + + let _gen_simple = gen Key.gen + + let pp fmt = function + | Write (k, v) -> Format.fprintf fmt "+ %a -> %a" Key.pp k Value.pp v + | Read k -> Format.fprintf fmt "%a ?" Key.pp k + | Delete k -> Format.fprintf fmt "- %a" Key.pp k + + let key (Write (k, _) | Read k | Delete k) = k + + let parallelizable_with action parallel_actions = + match action with + | Read k -> + (* Can be in parallel with other reads, and writes on other keys than k *) + List.for_all + (function Read _ -> true | Write (k', _) | Delete k' -> k <> k') + parallel_actions + | Write (k, _) | Delete k -> + (* Can be in parallel with actions on other keys *) + List.for_all (fun a -> key a <> k) parallel_actions +end + +(** A scenario is a parallelizable list of sequence of actions. Sequential tests + have only a single list. *) +module Scenario (Key : GENERATABLE) (Value : GENERATABLE) = struct + module Action = Action (Key) (Value) + + module KeyMap = Map.Make (struct + type t = Key.t + + let compare = Stdlib.compare + end) + + let gen_sequence ?no_delete keys = + let open QCheck2.Gen in + let key_gen = oneofl keys in + let size = frequency [(95, small_nat); (5, nat)] in + list_size size (Action.gen ?no_delete key_gen) + + let gen_sequential ?no_delete keys = + let open QCheck2.Gen in + let+ sequence = gen_sequence ?no_delete keys in + List.map (fun a -> [a]) sequence + + let parallelize sequence = + let l = + List.fold_left + (fun acc action -> + match acc with + | parallel_actions :: previous + when Action.parallelizable_with action parallel_actions -> + (action :: parallel_actions) :: previous + | previous -> [action] :: previous) + [] + sequence + in + List.rev_map List.rev l + + let gen_parallel ?no_delete keys = + let open QCheck2.Gen in + let+ sequence = gen_sequence ?no_delete keys in + parallelize sequence + + let gen ?no_delete keys kind = + match kind with + | `Sequential -> gen_sequential ?no_delete keys + | `Parallel -> gen_parallel ?no_delete keys + + let pp_parallel fmt = + Format.fprintf fmt "[@[ %a@ @]]" + @@ Format.pp_print_list + ~pp_sep:(fun fmt () -> Format.fprintf fmt " ||@ ") + Action.pp + + let pp fmt = + Format.fprintf fmt "[@[ %a@ @]]" + @@ Format.pp_print_list + ~pp_sep:(fun fmt () -> Format.fprintf fmt " ;@ ") + pp_parallel + + let print = Format.asprintf "%a" pp +end + +(* Because a scenario creates files onto the disk, we need a way to + generate unique names. For debugging purpose, and because of the + shrinking of QCheck2, it is easier to track tries with a simple + counter. *) +let uid = ref 0 + +(** This functor produces a [check_run] functions that plays a scenario and runs + checks on it. *) +module Runner + (Key : GENERATABLE) + (Value : GENERATABLE) (Store : sig + type t + + val load : path:string -> t tzresult Lwt.t + + val read : t -> Key.t -> Value.t option tzresult Lwt.t + + val write : t -> Key.t -> Value.t -> unit tzresult Lwt.t + + val delete : t -> Key.t -> unit tzresult Lwt.t + + val close : t -> unit tzresult Lwt.t + end) = +struct + module Scenario = Scenario (Key) (Value) + + module KeySet = Set.Make (struct + type t = Key.t + + let compare = Stdlib.compare + end) + + (** Retrieve the last written value from a list of executed actions (from + newest to oldest). *) + let rec last_written_value key = function + | [] -> None + | Scenario.Action.Delete k :: _ when k = key -> None + | Write (k, v) :: _ when k = key -> Some v + | _ :: executed -> last_written_value key executed + + let pp_opt_value = + Format.pp_print_option + ~none:(fun fmt () -> Format.pp_print_string fmt "[None]") + Value.pp + + let check_read_value_last_write key res executed = + let open Lwt_result_syntax in + let last = last_written_value key executed in + if res <> last then + failwith + "Read %a for key %a, but last wrote %a@." + pp_opt_value + res + Key.pp + key + pp_opt_value + last + else return_unit + + (** Checks that the value associated to [key] in the store is the last written + value is the list of executed actions. *) + let check_read_last_write store executed key = + let open Lwt_result_syntax in + let* res = Store.read store key in + check_read_value_last_write key res executed + + (** Checks that the store and the witness agree on the value associated to + [key] . *) + let check_store_agree_witness store witness key = + let open Lwt_result_syntax in + let* store_res = Store.read store key in + let witness_res = Stdlib.Hashtbl.find_opt witness key in + if store_res <> witness_res then + failwith + "Read %a from store for key %a, but hash table witness contains wrote \ + %a@." + pp_opt_value + store_res + Key.pp + key + pp_opt_value + witness_res + else return_unit + + let check_store_agree_witness store witness keys = + KeySet.iter_es (check_store_agree_witness store witness) keys + + (** Always close loaded stores to avoid leak when some tests fail. *) + let with_store path f = + let open Lwt_result_syntax in + let* store = Store.load ~path in + Lwt.finalize (fun () -> f store) @@ fun () -> + let open Lwt_syntax in + let* _ = Store.close store in + return_unit + + let run scenario = + let open Lwt_result_syntax in + incr uid ; + (* To avoid any conflict with previous runs of this test. *) + let pid = Unix.getpid () in + let path = + Filename.(concat @@ get_temp_dir_name ()) + (Format.sprintf "tezos-layer2-indexed-store-test-%d-%d" pid !uid) + in + (* Use use a hash table as a witness for the result of our scenario. Each + action is performed both on the witness (in memory) and the real + store. *) + (* Actions on the hash table witness. *) + let witness = Stdlib.Hashtbl.create 9 in + let last_witness_read = ref None in + let run_witness_action = function + | Scenario.Action.Write (k, v) -> Stdlib.Hashtbl.replace witness k v + | Read k -> + let res = Stdlib.Hashtbl.find_opt witness k in + last_witness_read := res + | Delete k -> Stdlib.Hashtbl.remove witness k + in + (* Actions on the real store. *) + let* keys = + with_store path @@ fun store -> + let last_store_read = ref None in + let run_store_action executed = function + | Scenario.Action.Write (k, v) -> Store.write store k v + | Read k -> + let* res = Store.read store k in + last_store_read := res ; + check_read_value_last_write k res executed + | Delete k -> Store.delete store k + in + (* Inner loop to run actions. It returns the keys of the scenario and the + executed actions. *) + let rec run_actions keys executed = function + | [] -> return (keys, executed) + | parallel_actions :: rest -> + List.iter run_witness_action parallel_actions ; + let* () = + List.iter_ep (run_store_action executed) parallel_actions + in + let keys = + KeySet.add_seq + (List.to_seq parallel_actions |> Seq.map Scenario.Action.key) + keys + in + run_actions keys (parallel_actions @ executed) rest + in + let* keys, executed = run_actions KeySet.empty [] scenario in + (* Check that the read value is the last write for all keys at the end. *) + let* () = KeySet.iter_es (check_read_last_write store executed) keys in + (* Check that the store and witness agree at the end. *) + let* () = check_store_agree_witness store witness keys in + return keys + in + (* Reload the store to clear the caches (of the stores, etc.). We + then check that the version on disk still agrees with the witness. *) + let* () = + with_store path @@ fun store -> + check_store_agree_witness store witness keys + in + return keys + + let check_run scenario = + let promise = + let open Lwt_result_syntax in + let* _ = run scenario in + return_true + in + match Lwt_main.run promise with + | Ok _ -> true + | Error err -> + QCheck2.Test.fail_reportf "%a@." Error_monad.pp_print_trace err +end + +let tests = ref [] + +(** Small imperative helper to create test and register it, so we don't forget + one. *) +let register_test ?if_assumptions_fail ?count ?long_factor ?max_gen ?max_fail + ?retries ?name ?print ?collect ?stats get prop = + let t = + QCheck2.Test.make + ?if_assumptions_fail + ?count + ?long_factor + ?max_gen + ?max_fail + ?retries + ?name + ?print + ?collect + ?stats + get + prop + in + tests := t :: !tests + +let () = + let module Singleton_for_test = struct + module S = Make_singleton (Value) + + type t = rw S.t + + let load = S.load Read_write + + let read s () = S.read s + + let write s () v = S.write s v + + let delete s () = S.delete s + + let close _ = Lwt_result_syntax.return_unit + end in + let module R = Runner (NoKey) (Value) (Singleton_for_test) in + let test_gen = R.Scenario.gen [()] in + register_test + ~print:R.Scenario.print + ~name:"singleton store (sequential)" + ~count:2_000 + ~max_fail:1_000 (*to stop shrinking after [max_fail] failures. *) + (test_gen `Sequential) + R.check_run + +let () = + let module Indexable_for_test = struct + module S = + Make_indexable + (struct + let name = "indexable_test" + end) + (SKey) + (Make_index_value (Make_fixed_encodable (FixedValue))) + + type t = rw S.t + + let load = S.load Read_write + + let read s k = S.find s k + + let write s k v = S.add s k v + + let delete _ _ = assert false + + let close = S.close + end in + let module R = Runner (SKey) (FixedValue) (Indexable_for_test) in + let test_gen kind = + let open QCheck2.Gen in + let* n = int_range 2 10 in + let* keys = SKey.gen_distinct n in + R.Scenario.gen ~no_delete:true keys kind + in + register_test + ~print:R.Scenario.print + ~name:"indexable store (sequential)" + ~count:2_000 + ~max_fail:1_000 (*to stop shrinking after [max_fail] failures. *) + (test_gen `Sequential) + R.check_run ; + register_test + ~print:R.Scenario.print + ~name:"indexable store (parallel)" + ~count:2_000 + ~max_fail:1_000 (*to stop shrinking after [max_fail] failures. *) + (test_gen `Parallel) + R.check_run + +let () = + let module Indexable_for_test = struct + module S = + Make_indexable_removable + (struct + let name = "indexable_removable_test" + end) + (SKey) + (Make_index_value (Make_fixed_encodable (FixedValue))) + + type t = rw S.t + + let load = S.load Read_write + + let read s k = S.find s k + + let write s k v = S.add s k v + + let delete s k = S.remove s k + + let close = S.close + end in + let module R = Runner (SKey) (FixedValue) (Indexable_for_test) in + let test_gen kind = + let open QCheck2.Gen in + let* n = int_range 2 10 in + let* keys = SKey.gen_distinct n in + R.Scenario.gen keys kind + in + register_test + ~print:R.Scenario.print + ~name:"indexable removable store (sequential)" + ~count:2_000 + ~max_fail:1_000 (*to stop shrinking after [max_fail] failures. *) + (test_gen `Sequential) + R.check_run ; + register_test + ~print:R.Scenario.print + ~name:"indexable removable store (parallel)" + ~count:2_000 + ~max_fail:1_000 (*to stop shrinking after [max_fail] failures. *) + (test_gen `Parallel) + R.check_run + +let () = + let module Indexed_file_for_test = struct + module S = + Make_simple_indexed_file + (struct + let name = "indexed_file" + end) + (Make_index_key (struct + include Make_fixed_encodable (SKey) + + let equal = String.equal + end)) + (struct + include Value + + module Header = struct + type t = int32 + + let name = "sum_chars" + + let encoding = Data_encoding.int32 + + let fixed_size = 4 + end + + (* Header contains sum of byte codes as an example *) + let header b = + Bytes.fold_left (fun n c -> n + Char.code c) 0 b |> Int32.of_int + end) + + type t = rw S.t + + let load ~path = S.load ~path ~cache_size:10 Read_write + + open Lwt_result_syntax + + let read s k = + let+ v = S.read s k in + Option.map fst v + + let write s k v = S.append s ~key:k ~value:v + + let delete _ _ = assert false + + let close = S.close + end in + let module R = Runner (SKey) (Value) (Indexed_file_for_test) in + let test_gen kind = + let open QCheck2.Gen in + let* n = int_range 2 10 in + let* keys = SKey.gen_distinct n in + R.Scenario.gen ~no_delete:true keys kind + in + register_test + ~print:R.Scenario.print + ~name:"indexed file store (sequential)" + ~count:2_000 + ~max_fail:1_000 (*to stop shrinking after [max_fail] failures. *) + (test_gen `Sequential) + R.check_run ; + register_test + ~print:R.Scenario.print + ~name:"indexed file store (parallel)" + ~count:2_000 + ~max_fail:1_000 (*to stop shrinking after [max_fail] failures. *) + (test_gen `Parallel) + R.check_run + +let () = QCheck_base_runner.run_tests_main (List.rev !tests)