From 24d27b942e80ae98e07c04c2cff38304fff693f2 Mon Sep 17 00:00:00 2001 From: Alain Mebsout Date: Tue, 24 Sep 2024 10:46:20 +0200 Subject: [PATCH 01/21] Rollup node/store: initial SQL schema --- .../migrations/000_initial.sql | 102 ++++++++++++++++++ 1 file changed, 102 insertions(+) create mode 100644 src/lib_smart_rollup_node/migrations/000_initial.sql diff --git a/src/lib_smart_rollup_node/migrations/000_initial.sql b/src/lib_smart_rollup_node/migrations/000_initial.sql new file mode 100644 index 000000000000..1a41c2419a0e --- /dev/null +++ b/src/lib_smart_rollup_node/migrations/000_initial.sql @@ -0,0 +1,102 @@ +-------------------------------------------------------- +-- SPDX-License-Identifier: MIT -- +-- Copyright (c) 2024 Functori -- +-------------------------------------------------------- + +-- For storing levels such as LCC, LPC, GC levels, etc. +-- and information about the head, finalized_level, etc. +CREATE TABLE rollup_node_state ( + name VARCHAR PRIMARY KEY, + value VARCHAR, + level INTEGER +); + +CREATE TABLE commitments ( + hash VARCHAR(54) PRIMARY KEY, + compressed_state VARCHAR(54) NOT NULL, + inbox_level INTEGER NOT NULL, + predecessor VARCHAR(54) NOT NULL, + number_of_ticks INTEGER NOT NULL +); + +CREATE INDEX idx_commitments_inbox_level +ON commitments(inbox_level); + +CREATE TABLE commitments_published_at_levels ( + commitment_hash VARCHAR(54) PRIMARY KEY, + first_published_at_level INTEGER NOT NULL, + published_at_level INTEGER +); + +CREATE TABLE inboxes ( + hash VARCHAR(55) PRIMARY KEY, + inbox_level INTEGER NOT NULL, + history_proof BLOB NOT NULL +); + +CREATE INDEX idx_inboxes_inbox_level +ON inboxes(inbox_level); + +CREATE TABLE messages ( + payload_hashes_hash VARCHAR(55) PRIMARY KEY, + inbox_level INTEGER NOT NULL, + message_list BLOB NOT NULL +); + +CREATE INDEX idx_messages_inbox_level +ON messages(inbox_level); + +CREATE TABLE outbox_messages ( + outbox_level INTEGER PRIMARY KEY, + messages VARCHAR NOT NULL, + executed_messages VARCHAR NOT NULL +); + +CREATE INDEX idx_outbox_messages_outbox_level +ON outbox_messages(outbox_level); + +CREATE TABLE protocols ( + hash VARCHAR(51) PRIMARY KEY, + proto_level INTEGER NOT NULL, + first_level INTEGER NOT NULL, + first_is_activation BOOLEAN NOT NULL +); + +CREATE INDEX idx_protocols_first_level +ON protocols(first_level); + +CREATE TABLE dal_slots_headers ( + block_hash VARCHAR(51) NOT NULL, + slot_index INTEGER NOT NULL, + published_level INTEGER NOT NULL, + slot_commitment VARCHAR NOT NULL, + PRIMARY KEY(block_hash, slot_index) +); + +CREATE TABLE dal_slots_statuses ( + block_hash VARCHAR(51) NOT NULL, + slot_index INTEGER NOT NULL, + attested BOOLEAN NOT NULL, + PRIMARY KEY(block_hash, slot_index) +); + +CREATE TABLE l2_blocks ( + block_hash VARCHAR(51) PRIMARY KEY, + level INTEGER NOT NULL, + predecessor VARCHAR(51) NOT NULL , + commitment_hash VARCHAR(54) , + previous_commitment_hash VARCHAR(54) NOT NULL, + context VARCHAR(54) NOT NULL, + inbox_witness VARCHAR(55) NOT NULL, + inbox_hash VARCHAR(55) NOT NULL , + initial_tick VARCHAR NOT NULL, + num_ticks INTEGER NOT NULL +); + +CREATE INDEX idx_l2_blocks_level +ON l2_blocks(level); + +CREATE TABLE l2_levels ( + level INTEGER PRIMARY KEY, + block_hash VARCHAR(51) NOT NULL +); -- GitLab From 30126b8eefdb6dbe54f814706bf83a86815c6f53 Mon Sep 17 00:00:00 2001 From: Alain Mebsout Date: Tue, 24 Sep 2024 11:09:36 +0200 Subject: [PATCH 02/21] Build/Rollup node: use SQLite library --- manifest/product_octez.ml | 33 +++++++++++++++++++++++ opam/octez-l2-libs.opam | 2 ++ src/lib_smart_rollup/index.mld | 1 + src/lib_smart_rollup_node/dune | 3 +++ src/lib_smart_rollup_node/migrations/dune | 19 +++++++++++++ 5 files changed, 58 insertions(+) create mode 100644 src/lib_smart_rollup_node/migrations/dune diff --git a/manifest/product_octez.ml b/manifest/product_octez.ml index 24c2c28d02f9..08f10b34e221 100644 --- a/manifest/product_octez.ml +++ b/manifest/product_octez.ml @@ -4645,6 +4645,37 @@ let octez_smart_rollup_lib = yaml; ] +let rollup_node_sqlite_migrations = + octez_l2_lib + "rollup_node_sqlite_migrations" + ~path:"src/lib_smart_rollup_node/migrations" + ~synopsis:"SQL migrations for the Rollup node store" + ~deps:[octez_base |> open_ ~m:"TzPervasives"; caqti_lwt; crunch; re] + ~dune: + Dune. + [ + [ + S "rule"; + [S "target"; S "migrations.ml"]; + [S "deps"; [S "glob_files"; S "*.sql"]]; + [ + S "action"; + [ + S "run"; + S "ocaml-crunch"; + S "-e"; + S "sql"; + S "-m"; + S "plain"; + S "-o"; + S "%{target}"; + S "-s"; + S "."; + ]; + ]; + ]; + ] + let octez_smart_rollup_node_lib = public_lib "octez-smart-rollup-node-lib" @@ -4669,6 +4700,8 @@ let octez_smart_rollup_node_lib = octez_injector_lib |> open_; octez_version_value |> open_; octez_layer2_store |> open_; + rollup_node_sqlite_migrations; + octez_sqlite |> open_; octez_crawler |> open_; octez_workers |> open_; octez_smart_rollup_lib |> open_; diff --git a/opam/octez-l2-libs.opam b/opam/octez-l2-libs.opam index 708481e19358..f7f1c974f725 100644 --- a/opam/octez-l2-libs.opam +++ b/opam/octez-l2-libs.opam @@ -28,6 +28,8 @@ depends: [ "caqti-lwt" { >= "2.0.1" } "caqti-driver-sqlite3" { >= "2.0.1" } "yaml" { >= "3.1.0" } + "crunch" { >= "3.3.0" } + "re" { >= "1.10.0" } "ppx_import" "qcheck-alcotest" { >= "0.20" } "octez-alcotezt" { = version } diff --git a/src/lib_smart_rollup/index.mld b/src/lib_smart_rollup/index.mld index 9e086f41b74c..22b5226e17fa 100644 --- a/src/lib_smart_rollup/index.mld +++ b/src/lib_smart_rollup/index.mld @@ -7,6 +7,7 @@ It contains the following libraries: - {{!module-Octez_smart_rollup}Octez_smart_rollup}: Library for Smart Rollups - {{!module-Octez_smart_rollup_wasm_benchmark_lib}Octez_smart_rollup_wasm_benchmark_lib}: Smart Rollup WASM benchmark library - {{!module-Octez_sqlite}Octez_sqlite}: SQLite wrappers and helpers +- {{!module-Rollup_node_sqlite_migrations}Rollup_node_sqlite_migrations}: SQL migrations for the Rollup node store - {{!module-Tezos_layer2_store}Tezos_layer2_store}: layer2 storage utils - {{!module-Tezos_scoru_wasm}Tezos_scoru_wasm} - {{!module-Tezos_scoru_wasm_durable_snapshot}Tezos_scoru_wasm_durable_snapshot}: Durable storage reference implementation diff --git a/src/lib_smart_rollup_node/dune b/src/lib_smart_rollup_node/dune index 6d726dba0667..819e6ee31241 100644 --- a/src/lib_smart_rollup_node/dune +++ b/src/lib_smart_rollup_node/dune @@ -22,6 +22,8 @@ octez-injector octez-version.value octez-l2-libs.layer2_store + octez-l2-libs.rollup_node_sqlite_migrations + octez-l2-libs.sqlite octez-crawler octez-libs.tezos-workers octez-l2-libs.smart-rollup) @@ -39,6 +41,7 @@ -open Octez_injector -open Tezos_version_value -open Tezos_layer2_store + -open Octez_sqlite -open Octez_crawler -open Tezos_workers -open Octez_smart_rollup)) diff --git a/src/lib_smart_rollup_node/migrations/dune b/src/lib_smart_rollup_node/migrations/dune new file mode 100644 index 000000000000..3fa53bad87f4 --- /dev/null +++ b/src/lib_smart_rollup_node/migrations/dune @@ -0,0 +1,19 @@ +; This file was automatically generated, do not edit. +; Edit file manifest/main.ml instead. + +(library + (name rollup_node_sqlite_migrations) + (public_name octez-l2-libs.rollup_node_sqlite_migrations) + (instrumentation (backend bisect_ppx)) + (libraries + octez-libs.base + caqti-lwt + re) + (flags + (:standard) + -open Tezos_base.TzPervasives)) + +(rule + (target migrations.ml) + (deps (glob_files *.sql)) + (action (run ocaml-crunch -e sql -m plain -o %{target} -s .))) -- GitLab From e5b9cac3f3c250afb8528ab10b4b732bd0aa814a Mon Sep 17 00:00:00 2001 From: Alain Mebsout Date: Tue, 24 Sep 2024 10:57:46 +0200 Subject: [PATCH 03/21] Rollup node/store: SQLite store migration logic This is the same code as the EVM store migration logic but they could diverge at some point. --- .../rollup_node_sqlite_migrations.ml | 49 +++++++++++++++++++ .../rollup_node_sqlite_migrations.mli | 20 ++++++++ 2 files changed, 69 insertions(+) create mode 100644 src/lib_smart_rollup_node/migrations/rollup_node_sqlite_migrations.ml create mode 100644 src/lib_smart_rollup_node/migrations/rollup_node_sqlite_migrations.mli diff --git a/src/lib_smart_rollup_node/migrations/rollup_node_sqlite_migrations.ml b/src/lib_smart_rollup_node/migrations/rollup_node_sqlite_migrations.ml new file mode 100644 index 000000000000..9f2b82ca072a --- /dev/null +++ b/src/lib_smart_rollup_node/migrations/rollup_node_sqlite_migrations.ml @@ -0,0 +1,49 @@ +(*****************************************************************************) +(* *) +(* SPDX-License-Identifier: MIT *) +(* Copyright (c) 2024 Nomadic Labs *) +(* *) +(*****************************************************************************) + +open Caqti_request.Infix +open Caqti_type.Std + +let re_t = Re.(compile @@ Posix.re "[0-9]{3}_([a-z0-9_]+).sql") + +module type S = sig + val name : string + + val apply : (unit, unit, [`Zero]) Caqti_request.t list +end + +type migration = (module S) + +let make_migration path = + let read_exn path = + match Migrations.read path with + | Some content -> content + | None -> raise (Invalid_argument "read_exn") + in + + let migration_name = + let open Re in + let (group : Group.t) = exec re_t path in + Group.get group 1 + in + + let migration_step = ( @@ ) (unit ->. unit) in + + let make_migration_requests str = + String.split ';' str + |> List.filter_map (fun i -> + match String.trim i with "" -> None | x -> Some (migration_step x)) + in + + (module struct + let name = migration_name + + let apply = make_migration_requests (read_exn path) + end : S) + +let migrations version = + List.take_n (version + 1) Migrations.file_list |> List.map make_migration diff --git a/src/lib_smart_rollup_node/migrations/rollup_node_sqlite_migrations.mli b/src/lib_smart_rollup_node/migrations/rollup_node_sqlite_migrations.mli new file mode 100644 index 000000000000..12cd6da1b8c6 --- /dev/null +++ b/src/lib_smart_rollup_node/migrations/rollup_node_sqlite_migrations.mli @@ -0,0 +1,20 @@ +(*****************************************************************************) +(* *) +(* SPDX-License-Identifier: MIT *) +(* Copyright (c) 2024 Nomadic Labs *) +(* *) +(*****************************************************************************) + +(* Top-level module shadowing the other modules of the library. *) + +module type S = sig + val name : string + + val apply : (unit, unit, [`Zero]) Caqti_request.t list +end + +type migration = (module S) + +(** [migrations v] returns the list of migrations to apply to obtain version [v] + from an empty database. *) +val migrations : int -> migration list -- GitLab From 4e6a905003440198c0a369fdaadf5d820ea55884 Mon Sep 17 00:00:00 2001 From: Alain Mebsout Date: Thu, 26 Sep 2024 14:44:08 +0200 Subject: [PATCH 04/21] Rollup node/store: SQLite store implementation skeleton --- src/lib_smart_rollup_node/sql_store.ml | 177 ++++++++++++++++++++++++ src/lib_smart_rollup_node/sql_store.mli | 28 ++++ src/lib_sqlite/sqlite.ml | 2 + src/lib_sqlite/sqlite.mli | 2 + 4 files changed, 209 insertions(+) create mode 100644 src/lib_smart_rollup_node/sql_store.ml create mode 100644 src/lib_smart_rollup_node/sql_store.mli diff --git a/src/lib_smart_rollup_node/sql_store.ml b/src/lib_smart_rollup_node/sql_store.ml new file mode 100644 index 000000000000..189e732d791e --- /dev/null +++ b/src/lib_smart_rollup_node/sql_store.ml @@ -0,0 +1,177 @@ +(*****************************************************************************) +(* *) +(* SPDX-License-Identifier: MIT *) +(* Copyright (c) 2024 Functori *) +(* *) +(*****************************************************************************) + +open Caqti_request.Infix +open Caqti_type.Std + +module Events = struct + include Internal_event.Simple + + let section = ["smart_rollup_node"; "store"] + + let init_store = + declare_0 + ~section + ~name:"smart_rollup_node_store_init" + ~msg:"Store is being initialized for the first time" + ~level:Notice + () + + let applied_migration = + declare_1 + ~section + ~name:"smart_rollup_node_store_applied_migration" + ~msg:"Applied migration {name} to the store" + ~level:Notice + ("name", Data_encoding.string) + + let migrations_from_the_future = + declare_2 + ~section + ~name:"smart_rollup_node_migrations_from_the_future" + ~msg: + "Store has {applied} migrations applied but the rollup node is only \ + aware of {known}" + ~level:Error + ("applied", Data_encoding.int31) + ("known", Data_encoding.int31) +end + +let with_connection store conn = + match conn with + | Some conn -> Sqlite.with_connection conn + | None -> + fun k -> Sqlite.use store @@ fun conn -> Sqlite.with_connection conn k + +let with_transaction store k = + Sqlite.use store @@ fun conn -> Sqlite.with_transaction conn k + +let table_exists_req = + (string ->! bool) + @@ {sql| + SELECT EXISTS ( + SELECT name FROM sqlite_master + WHERE type='table' + AND name=? + )|sql} + +module Migrations = struct + module Q = struct + let create_table = + (unit ->. unit) + @@ {sql| + CREATE TABLE migrations ( + id SERIAL PRIMARY KEY, + name TEXT + )|sql} + + let current_migration = + (unit ->? int) @@ {|SELECT id FROM migrations ORDER BY id DESC LIMIT 1|} + + let register_migration = + (t2 int string ->. unit) + @@ {sql| + INSERT INTO migrations (id, name) VALUES (?, ?) + |sql} + + let version = 0 + + let all : Rollup_node_sqlite_migrations.migration list = + Rollup_node_sqlite_migrations.migrations version + end + + let create_table store = + Sqlite.with_connection store @@ fun conn -> + Sqlite.Db.exec conn Q.create_table () + + let table_exists store = + Sqlite.with_connection store @@ fun conn -> + Sqlite.Db.find conn table_exists_req "migrations" + + let missing_migrations store = + let open Lwt_result_syntax in + let all_migrations = List.mapi (fun i m -> (i, m)) Q.all in + let* current = + Sqlite.with_connection store @@ fun conn -> + Sqlite.Db.find_opt conn Q.current_migration () + in + match current with + | Some current -> + let applied = current + 1 in + let known = List.length all_migrations in + if applied <= known then return (List.drop_n applied all_migrations) + else + let*! () = + Events.(emit migrations_from_the_future) (applied, known) + in + failwith + "Cannot use a store modified by a more up-to-date version of the \ + EVM node" + | None -> return all_migrations + + let apply_migration store id (module M : Rollup_node_sqlite_migrations.S) = + let open Lwt_result_syntax in + Sqlite.with_connection store @@ fun conn -> + let* () = List.iter_es (fun up -> Sqlite.Db.exec conn up ()) M.apply in + Sqlite.Db.exec conn Q.register_migration (id, M.name) +end + +let sqlite_file_name = "store.sqlite" + +type 'a t = Sqlite.t + +type rw = Store_sigs.rw t + +type ro = Store_sigs.ro t + +let init (type m) (mode : m Store_sigs.mode) ~data_dir : m t tzresult Lwt.t = + let open Lwt_result_syntax in + let path = Filename.concat data_dir sqlite_file_name in + let*! exists = Lwt_unix.file_exists path in + let migration conn = + Sqlite.assert_in_transaction conn ; + let* () = + if not exists then + let* () = Migrations.create_table conn in + let*! () = Events.(emit init_store) () in + return_unit + else + let* table_exists = Migrations.table_exists conn in + let* () = + when_ (not table_exists) (fun () -> + failwith "A store already exists, but its content is incorrect.") + in + return_unit + in + let* migrations = Migrations.missing_migrations conn in + let*? () = + match (mode, migrations) with + | Read_only, _ :: _ -> + error_with + "The store has %d missing migrations but was opened in read-only \ + mode." + (List.length migrations) + | _, _ -> Ok () + in + let* () = + List.iter_es + (fun (i, ((module M : Rollup_node_sqlite_migrations.S) as mig)) -> + let* () = Migrations.apply_migration conn i mig in + let*! () = Events.(emit applied_migration) M.name in + return_unit) + migrations + in + return_unit + in + let perm = + match mode with Read_only -> `Read_only | Read_write -> `Read_write + in + Sqlite.init ~path ~perm migration + +let close store = Sqlite.close store + +let readonly (store : Store_sigs.rw t) : Store_sigs.ro t = store diff --git a/src/lib_smart_rollup_node/sql_store.mli b/src/lib_smart_rollup_node/sql_store.mli new file mode 100644 index 000000000000..babbbfbc4972 --- /dev/null +++ b/src/lib_smart_rollup_node/sql_store.mli @@ -0,0 +1,28 @@ +(*****************************************************************************) +(* *) +(* SPDX-License-Identifier: MIT *) +(* Copyright (c) 2024 Functori *) +(* *) +(*****************************************************************************) + +(** Type of store (a handler to the underlying database). *) +type 'a t + +(** Read/write store {!t}. *) +type rw = Store_sigs.rw t + +(** Read only store {!t}. *) +type ro = Store_sigs.ro t + +(** Name of SQLite file in data directory. *) +val sqlite_file_name : string + +(** [init mode ~data_dir] initializes the store and returns it. *) +val init : 'a Store_sigs.mode -> data_dir:string -> 'a t tzresult Lwt.t + +(** Close the store by freeing all resources and closing database + connections. *) +val close : _ t -> unit Lwt.t + +(** Returns a read-only version of the store. *) +val readonly : _ t -> ro diff --git a/src/lib_sqlite/sqlite.ml b/src/lib_sqlite/sqlite.ml index 5cec0ae5f143..aab2fc1e43f5 100644 --- a/src/lib_sqlite/sqlite.ml +++ b/src/lib_sqlite/sqlite.ml @@ -209,3 +209,5 @@ let init ~path ~perm migration_code = let* () = set_wal_journal_mode conn in let* () = with_transaction conn migration_code in return store + +let close (Pool {db_pool}) = Caqti_lwt_unix.Pool.drain db_pool diff --git a/src/lib_sqlite/sqlite.mli b/src/lib_sqlite/sqlite.mli index 8d07ec8e7c7d..a07026611549 100644 --- a/src/lib_sqlite/sqlite.mli +++ b/src/lib_sqlite/sqlite.mli @@ -29,6 +29,8 @@ val init : (conn -> unit tzresult Lwt.t) -> t tzresult Lwt.t +val close : t -> unit Lwt.t + (** Rebuild the database in [output_db_file] using the {{:https://www.sqlite.org/lang_vacuum.html}[VACUUM] sqlite command}. This function is useful to backup the database. *) -- GitLab From cb80d07443eedf07d20e87852ac0ba9513fda352 Mon Sep 17 00:00:00 2001 From: Alain Mebsout Date: Thu, 26 Sep 2024 14:48:16 +0200 Subject: [PATCH 05/21] Rollup node store: Commitments SQLite store --- src/lib_smart_rollup_node/sql_store.ml | 101 ++++++++++++++++++++++++ src/lib_smart_rollup_node/sql_store.mli | 35 ++++++++ 2 files changed, 136 insertions(+) diff --git a/src/lib_smart_rollup_node/sql_store.ml b/src/lib_smart_rollup_node/sql_store.ml index 189e732d791e..a69520093af1 100644 --- a/src/lib_smart_rollup_node/sql_store.ml +++ b/src/lib_smart_rollup_node/sql_store.ml @@ -50,6 +50,39 @@ let with_connection store conn = let with_transaction store k = Sqlite.use store @@ fun conn -> Sqlite.with_transaction conn k +module Types = struct + let level = int32 + + let tzcustom ~encode ~decode t = + custom + ~encode:(fun v -> Ok (encode v)) + ~decode:(fun s -> + decode s + |> Result.map_error (fun err -> Format.asprintf "%a" pp_print_trace err)) + t + + let commitment_hash = + tzcustom + ~encode:Commitment.Hash.to_b58check + ~decode:Commitment.Hash.of_b58check + string + + let state_hash = + tzcustom + ~encode:State_hash.to_b58check + ~decode:State_hash.of_b58check + string + + let commitment = + product (fun compressed_state inbox_level predecessor number_of_ticks -> + Commitment.{compressed_state; inbox_level; predecessor; number_of_ticks}) + @@ proj state_hash (fun c -> c.Commitment.compressed_state) + @@ proj level (fun c -> c.Commitment.inbox_level) + @@ proj commitment_hash (fun c -> c.Commitment.predecessor) + @@ proj int64 (fun c -> c.Commitment.number_of_ticks) + @@ proj_end +end + let table_exists_req = (string ->! bool) @@ {sql| @@ -175,3 +208,71 @@ let init (type m) (mode : m Store_sigs.mode) ~data_dir : m t tzresult Lwt.t = let close store = Sqlite.close store let readonly (store : Store_sigs.rw t) : Store_sigs.ro t = store + +module Commitments = struct + module Q = struct + open Types + + let insert = + (t2 commitment_hash commitment ->. unit) + @@ {sql| + REPLACE INTO commitments + (hash, compressed_state, inbox_level, predecessor, number_of_ticks) + VALUES (?, ?, ?, ?, ?) + |sql} + + let select = + (commitment_hash ->? commitment) + @@ {sql| + SELECT compressed_state, inbox_level, predecessor, number_of_ticks + FROM commitments + WHERE hash = ? + |sql} + + let delete_before = + (level ->. unit) + @@ {sql| + DELETE FROM commitments WHERE inbox_level < ? + |sql} + + let lcc = + (unit ->? commitment) + @@ {sql| + SELECT compressed_state, inbox_level, predecessor, number_of_ticks + FROM commitments + INNER JOIN rollup_node_state + ON name = "lcc" AND value = hash + |sql} + + let lpc = + (unit ->? commitment) + @@ {sql| + SELECT compressed_state, inbox_level, predecessor, number_of_ticks + FROM commitments + INNER JOIN rollup_node_state + ON name = "lpc" AND value = hash + |sql} + end + + let store ?conn store commitment = + let open Lwt_result_syntax in + with_connection store conn @@ fun conn -> + let hash = Commitment.hash commitment in + let+ () = Sqlite.Db.exec conn Q.insert (hash, commitment) in + hash + + let find ?conn store commitment_hash = + with_connection store conn @@ fun conn -> + Sqlite.Db.find_opt conn Q.select commitment_hash + + let find_lcc ?conn store commitment_hash = + with_connection store conn @@ fun conn -> + Sqlite.Db.find_opt conn Q.lcc commitment_hash + + let find_lpc ?conn store = + with_connection store conn @@ fun conn -> Sqlite.Db.find_opt conn Q.lpc () + + let delete_before ?conn store ~level = + with_connection store conn @@ fun conn -> + Sqlite.Db.exec conn Q.delete_before level +end diff --git a/src/lib_smart_rollup_node/sql_store.mli b/src/lib_smart_rollup_node/sql_store.mli index babbbfbc4972..6447222d85b0 100644 --- a/src/lib_smart_rollup_node/sql_store.mli +++ b/src/lib_smart_rollup_node/sql_store.mli @@ -26,3 +26,38 @@ val close : _ t -> unit Lwt.t (** Returns a read-only version of the store. *) val readonly : _ t -> ro + +(** {2 Modules for storing data in the store} + + Each module maps to a table in the database. Every read and write function + has two parameters: [store] and an optional [?conn]. + + When [~conn] is provided, the database connection is used directly to + execute the queries. If not, a new (or old) connection is taken from the + pool of [store]. +*) + +(** Storage containing commitments and corresponding commitment hashes that the + rollup node has knowledge of. *) +module Commitments : sig + (** [store ?conn s commitment] stores a new [commitment] associated to its + hash and returns it. *) + val store : + ?conn:Sqlite.conn -> rw -> Commitment.t -> Commitment.Hash.t tzresult Lwt.t + + (** Retrieve a commitment by its hash. *) + val find : + ?conn:Sqlite.conn -> + _ t -> + Commitment.Hash.t -> + Commitment.t option tzresult Lwt.t + + (** Retrieve a commitment corresponding to the LCC, the last cemented + commitment. *) + val find_lcc : + ?conn:Sqlite.conn -> _ t -> unit -> Commitment.t option tzresult Lwt.t + + (** Retrieve the last commitment published by the rollup node + (as known from the handled L1 blocks). *) + val find_lpc : ?conn:Sqlite.conn -> _ t -> Commitment.t option tzresult Lwt.t +end -- GitLab From 8fcfa985b8bf46713b569bea1f073c868a250422 Mon Sep 17 00:00:00 2001 From: Alain Mebsout Date: Thu, 26 Sep 2024 14:49:41 +0200 Subject: [PATCH 06/21] Rollup node store: Commitments publication levels SQLite store --- src/lib_smart_rollup_node/sql_store.ml | 66 +++++++++++++++++++++++++ src/lib_smart_rollup_node/sql_store.mli | 33 +++++++++++++ 2 files changed, 99 insertions(+) diff --git a/src/lib_smart_rollup_node/sql_store.ml b/src/lib_smart_rollup_node/sql_store.ml index a69520093af1..539e8c1af8b3 100644 --- a/src/lib_smart_rollup_node/sql_store.ml +++ b/src/lib_smart_rollup_node/sql_store.ml @@ -276,3 +276,69 @@ module Commitments = struct with_connection store conn @@ fun conn -> Sqlite.Db.exec conn Q.delete_before level end + +module Commitments_published_at_levels = struct + type publication_levels = { + first_published_at_level : int32; + published_at_level : int32 option; + } + + module Q = struct + open Types + + let publication_levels = + product (fun first_published_at_level published_at_level -> + {first_published_at_level; published_at_level}) + @@ proj level (fun p -> p.first_published_at_level) + @@ proj (option level) (fun p -> p.published_at_level) + @@ proj_end + + let register = + (t2 commitment_hash publication_levels ->. unit) + @@ {sql| + REPLACE INTO commitments_published_at_levels + (commitment_hash, first_published_at_level, published_at_level) + VALUES (?, ?, ?) + |sql} + + let select = + (commitment_hash ->? publication_levels) + @@ {sql| + SELECT first_published_at_level, published_at_level + FROM commitments_published_at_levels + WHERE commitment_hash = ? + |sql} + + let first_published = + (commitment_hash ->? level) + @@ {sql| + SELECT first_published_at_level + FROM commitments_published_at_levels + WHERE commitment_hash = ? + AND first_published_at_level IS NOT NULL + |sql} + + let delete_before = + (level ->. unit) + @@ {sql| + DELETE FROM commitments_published_at_levels + WHERE first_published_at_level < ? + |sql} + end + + let register ?conn store commitment levels = + with_connection store conn @@ fun conn -> + Sqlite.Db.exec conn Q.register (commitment, levels) + + let get ?conn store commitment = + with_connection store conn @@ fun conn -> + Sqlite.Db.find_opt conn Q.select commitment + + let get_first_published_level ?conn store commitment = + with_connection store conn @@ fun conn -> + Sqlite.Db.find_opt conn Q.first_published commitment + + let delete_before ?conn store ~level = + with_connection store conn @@ fun conn -> + Sqlite.Db.exec conn Q.delete_before level +end diff --git a/src/lib_smart_rollup_node/sql_store.mli b/src/lib_smart_rollup_node/sql_store.mli index 6447222d85b0..fcff56a99391 100644 --- a/src/lib_smart_rollup_node/sql_store.mli +++ b/src/lib_smart_rollup_node/sql_store.mli @@ -61,3 +61,36 @@ module Commitments : sig (as known from the handled L1 blocks). *) val find_lpc : ?conn:Sqlite.conn -> _ t -> Commitment.t option tzresult Lwt.t end + +(** Storage mapping commitment hashes to the level when they were published by + the rollup node. *) +module Commitments_published_at_levels : sig + type publication_levels = { + first_published_at_level : int32; + (** The level at which this commitment was first published. *) + published_at_level : int32 option; + (** The level at which we published this commitment. If + [first_published_at_level <> published_at_level] it means that the + commitment is republished. *) + } + + (** [register ?conn s hash levels] stores the publication levels for + commitment whose hash is [hash]. *) + val register : + ?conn:Sqlite.conn -> + rw -> + Commitment.Hash.t -> + publication_levels -> + unit tzresult Lwt.t + + (** Retrieve the publication levels for a commitment by its hash. *) + val get : + ?conn:Sqlite.conn -> + _ t -> + Commitment.Hash.t -> + publication_levels option tzresult Lwt.t + + (** Retrieve the level at which a commitment was first published. *) + val get_first_published_level : + ?conn:Sqlite.conn -> _ t -> Commitment.Hash.t -> int32 option tzresult Lwt.t +end -- GitLab From e0fa6318ca05b71070dea0c70b1ecc13fd98a746 Mon Sep 17 00:00:00 2001 From: Alain Mebsout Date: Thu, 26 Sep 2024 14:55:43 +0200 Subject: [PATCH 07/21] Rollup node store: Inboxes SQLite store --- src/lib_smart_rollup_node/sql_store.ml | 89 +++++++++++++++++++++++++ src/lib_smart_rollup_node/sql_store.mli | 15 +++++ 2 files changed, 104 insertions(+) diff --git a/src/lib_smart_rollup_node/sql_store.ml b/src/lib_smart_rollup_node/sql_store.ml index 539e8c1af8b3..b389a7791478 100644 --- a/src/lib_smart_rollup_node/sql_store.ml +++ b/src/lib_smart_rollup_node/sql_store.ml @@ -53,6 +53,24 @@ let with_transaction store k = module Types = struct let level = int32 + let from_encoding ~name encoding = + custom + ~encode:(fun v -> + Data_encoding.Binary.to_string encoding v + |> Result.map_error + (Format.asprintf + "Fail to encode %s for database: %a" + name + Data_encoding.Binary.pp_write_error)) + ~decode:(fun s -> + Data_encoding.Binary.of_string encoding s + |> Result.map_error + (Format.asprintf + "Fail to decode %s in database: %a" + name + Data_encoding.Binary.pp_read_error)) + octets + let tzcustom ~encode ~decode t = custom ~encode:(fun v -> Ok (encode v)) @@ -61,6 +79,12 @@ module Types = struct |> Result.map_error (fun err -> Format.asprintf "%a" pp_print_trace err)) t + let inbox_hash = + tzcustom + ~encode:Inbox_hash.to_b58check + ~decode:Inbox_hash.of_b58check + string + let commitment_hash = tzcustom ~encode:Commitment.Hash.to_b58check @@ -73,6 +97,9 @@ module Types = struct ~decode:State_hash.of_b58check string + let history_proof = + from_encoding ~name:"Inbox.history_proof" Inbox.history_proof_encoding + let commitment = product (fun compressed_state inbox_level predecessor number_of_ticks -> Commitment.{compressed_state; inbox_level; predecessor; number_of_ticks}) @@ -81,6 +108,12 @@ module Types = struct @@ proj commitment_hash (fun c -> c.Commitment.predecessor) @@ proj int64 (fun c -> c.Commitment.number_of_ticks) @@ proj_end + + let inbox = + product (fun level m -> Inbox.{level; old_levels_messages = m}) + @@ proj level (fun i -> i.Inbox.level) + @@ proj history_proof (fun i -> i.Inbox.old_levels_messages) + @@ proj_end end let table_exists_req = @@ -342,3 +375,59 @@ module Commitments_published_at_levels = struct with_connection store conn @@ fun conn -> Sqlite.Db.exec conn Q.delete_before level end + +module Inboxes = struct + module Q = struct + open Types + + let insert = + (t2 inbox_hash inbox ->. unit) + @@ {sql| + REPLACE INTO inboxes + (hash, inbox_level, history_proof) + VALUES (?, ?, ?) + |sql} + + let select = + (inbox_hash ->? inbox) + @@ {sql| + SELECT inbox_level, history_proof + FROM inboxes + WHERE hash = ? + |sql} + + let select_by_block_hash = + (block_hash ->? inbox) + @@ {sql| + SELECT i.inbox_level, i.history_proof + FROM inboxes as i + INNER JOIN l2_blocks as b + ON b.inbox_hash = i.hash AND b.block_hash = ? + |sql} + + let delete_before = + (level ->. unit) + @@ {sql| + DELETE FROM inboxes WHERE inbox_level < ? + |sql} + end + + let store ?conn store inbox = + let open Lwt_result_syntax in + with_connection store conn @@ fun conn -> + let hash = Inbox.hash inbox in + let+ () = Sqlite.Db.exec conn Q.insert (hash, inbox) in + hash + + let find ?conn store inbox_hash = + with_connection store conn @@ fun conn -> + Sqlite.Db.find_opt conn Q.select inbox_hash + + let find_by_block_hash ?conn store inbox_hash = + with_connection store conn @@ fun conn -> + Sqlite.Db.find_opt conn Q.select_by_block_hash inbox_hash + + let delete_before ?conn store ~level = + with_connection store conn @@ fun conn -> + Sqlite.Db.exec conn Q.delete_before level +end diff --git a/src/lib_smart_rollup_node/sql_store.mli b/src/lib_smart_rollup_node/sql_store.mli index fcff56a99391..fa9c1582be31 100644 --- a/src/lib_smart_rollup_node/sql_store.mli +++ b/src/lib_smart_rollup_node/sql_store.mli @@ -94,3 +94,18 @@ module Commitments_published_at_levels : sig val get_first_published_level : ?conn:Sqlite.conn -> _ t -> Commitment.Hash.t -> int32 option tzresult Lwt.t end + +(** Aggregated collection of messages from the L1 inbox *) +module Inboxes : sig + (** [store ?conn s inbox] stores the [inbox], associated to its hash and + returns it. *) + val store : ?conn:Sqlite.conn -> rw -> Inbox.t -> Inbox_hash.t tzresult Lwt.t + + (** Retrieve an inbox by its hash. *) + val find : + ?conn:Sqlite.conn -> _ t -> Inbox_hash.t -> Inbox.t option tzresult Lwt.t + + (** Retrieve an inbox by its the block hash in which it appeared. *) + val find_by_block_hash : + ?conn:Sqlite.conn -> _ t -> Block_hash.t -> Inbox.t option tzresult Lwt.t +end -- GitLab From e23a189cc69e42c24d742cfc3fec2bf2bd62e27a Mon Sep 17 00:00:00 2001 From: Alain Mebsout Date: Thu, 26 Sep 2024 14:56:34 +0200 Subject: [PATCH 08/21] Rollup node store: Messages SQLite store --- src/lib_smart_rollup_node/sql_store.ml | 51 +++++++++++++++++++++++++ src/lib_smart_rollup_node/sql_store.mli | 20 ++++++++++ 2 files changed, 71 insertions(+) diff --git a/src/lib_smart_rollup_node/sql_store.ml b/src/lib_smart_rollup_node/sql_store.ml index b389a7791478..b8f330c6e4e6 100644 --- a/src/lib_smart_rollup_node/sql_store.ml +++ b/src/lib_smart_rollup_node/sql_store.ml @@ -85,6 +85,12 @@ module Types = struct ~decode:Inbox_hash.of_b58check string + let payload_hashes_hash = + tzcustom + ~encode:Merkelized_payload_hashes_hash.to_b58check + ~decode:Merkelized_payload_hashes_hash.of_b58check + string + let commitment_hash = tzcustom ~encode:Commitment.Hash.to_b58check @@ -100,6 +106,11 @@ module Types = struct let history_proof = from_encoding ~name:"Inbox.history_proof" Inbox.history_proof_encoding + let messages_list = + from_encoding + ~name:"message list" + Data_encoding.(list @@ dynamic_size (Variable.string' Hex)) + let commitment = product (fun compressed_state inbox_level predecessor number_of_ticks -> Commitment.{compressed_state; inbox_level; predecessor; number_of_ticks}) @@ -431,3 +442,43 @@ module Inboxes = struct with_connection store conn @@ fun conn -> Sqlite.Db.exec conn Q.delete_before level end + +module Messages = struct + module Q = struct + open Types + + let insert = + (t3 payload_hashes_hash level messages_list ->. unit) + @@ {sql| + REPLACE INTO messages + (payload_hashes_hash, inbox_level, message_list) + VALUES (?, ?, ?) + |sql} + + let select = + (payload_hashes_hash ->? t2 level messages_list) + @@ {sql| + SELECT inbox_level, message_list + FROM messages + WHERE payload_hashes_hash = ? + |sql} + + let delete_before = + (level ->. unit) + @@ {sql| + DELETE FROM messages WHERE inbox_level < ? + |sql} + end + + let store ?conn store ~level payload_hashes_hash messages = + with_connection store conn @@ fun conn -> + Sqlite.Db.exec conn Q.insert (payload_hashes_hash, level, messages) + + let find ?conn store hash = + with_connection store conn @@ fun conn -> + Sqlite.Db.find_opt conn Q.select hash + + let delete_before ?conn store ~level = + with_connection store conn @@ fun conn -> + Sqlite.Db.exec conn Q.delete_before level +end diff --git a/src/lib_smart_rollup_node/sql_store.mli b/src/lib_smart_rollup_node/sql_store.mli index fa9c1582be31..fc1152c2d262 100644 --- a/src/lib_smart_rollup_node/sql_store.mli +++ b/src/lib_smart_rollup_node/sql_store.mli @@ -109,3 +109,23 @@ module Inboxes : sig val find_by_block_hash : ?conn:Sqlite.conn -> _ t -> Block_hash.t -> Inbox.t option tzresult Lwt.t end + +(** Storage for persisting messages downloaded from the L1 node. *) +module Messages : sig + (** [store ?conn s ~level hash messages] stores the [messages] who's payload + hash is [hash] that appeared in inbox level [~level]. *) + val store : + ?conn:Sqlite.conn -> + rw -> + level:int32 -> + Merkelized_payload_hashes_hash.t -> + string list -> + unit tzresult Lwt.t + + (** Retrieve messages by their payload hash. *) + val find : + ?conn:Sqlite.conn -> + _ t -> + Merkelized_payload_hashes_hash.t -> + (int32 * string list) option tzresult Lwt.t +end -- GitLab From bf394b8445d402ff83f28282d307f053cb501e10 Mon Sep 17 00:00:00 2001 From: Alain Mebsout Date: Thu, 26 Sep 2024 14:57:46 +0200 Subject: [PATCH 09/21] Rollup node store: Outbox messages SQLite store --- src/lib_base/bitset.ml | 5 ++ src/lib_base/bitset.mli | 4 ++ src/lib_smart_rollup_node/sql_store.ml | 87 +++++++++++++++++++++++++ src/lib_smart_rollup_node/sql_store.mli | 31 +++++++++ 4 files changed, 127 insertions(+) diff --git a/src/lib_base/bitset.ml b/src/lib_base/bitset.ml index a6894f9c1be2..1efff8a62a98 100644 --- a/src/lib_base/bitset.ml +++ b/src/lib_base/bitset.ml @@ -72,3 +72,8 @@ let cardinal = Z.popcount let to_z z = z + +let from_z z = + if Z.sign z < 0 then + error_with "Bitset.from_z: argument %a is negative" Z.pp_print z + else Ok z diff --git a/src/lib_base/bitset.mli b/src/lib_base/bitset.mli index 4bf58c716b76..3ba9fb041033 100644 --- a/src/lib_base/bitset.mli +++ b/src/lib_base/bitset.mli @@ -74,3 +74,7 @@ val cardinal : t -> int (** [to_z t] Returns the sum of powers of two of the given bitset. *) val to_z : t -> Z.t + +(** [from_z i] builds a bitset from its integer representation (partial + function). *) +val from_z : Z.t -> t tzresult diff --git a/src/lib_smart_rollup_node/sql_store.ml b/src/lib_smart_rollup_node/sql_store.ml index b8f330c6e4e6..0e13a0ee4493 100644 --- a/src/lib_smart_rollup_node/sql_store.ml +++ b/src/lib_smart_rollup_node/sql_store.ml @@ -103,6 +103,15 @@ module Types = struct ~decode:State_hash.of_b58check string + let z = + custom + ~encode:(fun i -> Ok (Z.to_string i)) + ~decode:(fun s -> + try Ok (Z.of_string s) with _ -> Error "Invalid Z.t value in database") + string + + let bitset = tzcustom ~encode:Bitset.to_z ~decode:Bitset.from_z z + let history_proof = from_encoding ~name:"Inbox.history_proof" Inbox.history_proof_encoding @@ -482,3 +491,81 @@ module Messages = struct with_connection store conn @@ fun conn -> Sqlite.Db.exec conn Q.delete_before level end + +module Outbox_messages = struct + type messages_per_level = {messages : Bitset.t; executed_messages : Bitset.t} + + module Q = struct + open Types + + let messages_per_level = + product (fun messages executed_messages -> {messages; executed_messages}) + @@ proj bitset (fun m -> m.messages) + @@ proj bitset (fun m -> m.executed_messages) + @@ proj_end + + let register = + (t2 level bitset ->. unit) + @@ {sql| + INSERT INTO outbox_messages + (outbox_level, messages, executed_messages) + VALUES ($1, $2, 0) + ON CONFLICT DO UPDATE SET messages = $2 + |sql} + + let range = + (t2 level level ->* t2 level messages_per_level) + @@ {sql| + SELECT outbox_level, messages, executed_messages + FROM outbox_messages + WHERE outbox_level BETWEEN ? AND ? + ORDER BY outbox_level DESC + |sql} + + let select_executed = + (level ->? bitset) + @@ {sql| + SELECT executed_messages + FROM outbox_messages + WHERE outbox_level = ? + |sql} + + let update_executed = + (t2 level bitset ->. unit) + @@ {sql| + UPDATE outbox_messages + SET executed_messages = $2 + WHERE outbox_level = $1 + |sql} + end + + let pending ?conn store ~min_level ~max_level = + with_connection store conn @@ fun conn -> + Sqlite.Db.fold + conn + Q.range + (fun (outbox_level, {messages; executed_messages}) acc -> + let pending_at_level = Bitset.diff messages executed_messages in + let l = Bitset.to_list pending_at_level in + if List.is_empty l then acc else (outbox_level, l) :: acc) + (min_level, max_level) + [] + + let register_outbox_messages ?conn store ~outbox_level ~indexes = + with_connection store conn @@ fun conn -> + Sqlite.Db.exec conn Q.register (outbox_level, indexes) + + let set_outbox_message_executed ?conn store ~outbox_level ~index = + let open Lwt_result_syntax in + with_connection store conn @@ fun conn -> + let* executed_messages = + Sqlite.Db.find_opt conn Q.select_executed outbox_level + in + match executed_messages with + | None -> + (* Not tracking *) + return_unit + | Some executed_messages -> + let*? executed_messages = Bitset.add executed_messages index in + Sqlite.Db.exec conn Q.update_executed (outbox_level, executed_messages) +end diff --git a/src/lib_smart_rollup_node/sql_store.mli b/src/lib_smart_rollup_node/sql_store.mli index fc1152c2d262..27a431f3f4b9 100644 --- a/src/lib_smart_rollup_node/sql_store.mli +++ b/src/lib_smart_rollup_node/sql_store.mli @@ -129,3 +129,34 @@ module Messages : sig Merkelized_payload_hashes_hash.t -> (int32 * string list) option tzresult Lwt.t end + +(** Storage for persisting outbox messages. *) +module Outbox_messages : sig + (** [pending ?conn s ~min_level ~max_level] returns all pending (i.e. non + executed) outbox messages between outbox levels [min_level] and + [max_level]. The result is given as a list of pairs whose first component + is the outbox level and the second is the message indexes list. *) + val pending : + ?conn:Sqlite.conn -> + _ t -> + min_level:int32 -> + max_level:int32 -> + (int32 * int list) list tzresult Lwt.t + + (** Register outbox messages for a given outbox level by its indexes. *) + val register_outbox_messages : + ?conn:Sqlite.conn -> + rw -> + outbox_level:int32 -> + indexes:Bitset.t -> + unit tzresult Lwt.t + + (** Register an outbox message as executed by its outbox level and its index + in the outbox. *) + val set_outbox_message_executed : + ?conn:Sqlite.conn -> + rw -> + outbox_level:int32 -> + index:int -> + unit tzresult Lwt.t +end -- GitLab From 7082b2901e1777fa6bc7ffe5ca0c7fe56eafef40 Mon Sep 17 00:00:00 2001 From: Alain Mebsout Date: Thu, 26 Sep 2024 14:59:32 +0200 Subject: [PATCH 10/21] Rollup node store: Protocols SQLite store --- src/lib_smart_rollup_node/sql_store.ml | 83 +++++++++++++++++++++++++ src/lib_smart_rollup_node/sql_store.mli | 38 +++++++++++ 2 files changed, 121 insertions(+) diff --git a/src/lib_smart_rollup_node/sql_store.ml b/src/lib_smart_rollup_node/sql_store.ml index 0e13a0ee4493..8bde04b1597d 100644 --- a/src/lib_smart_rollup_node/sql_store.ml +++ b/src/lib_smart_rollup_node/sql_store.ml @@ -103,6 +103,12 @@ module Types = struct ~decode:State_hash.of_b58check string + let protocol_hash = + tzcustom + ~encode:Protocol_hash.to_b58check + ~decode:Protocol_hash.of_b58check + string + let z = custom ~encode:(fun i -> Ok (Z.to_string i)) @@ -569,3 +575,80 @@ module Outbox_messages = struct let*? executed_messages = Bitset.add executed_messages index in Sqlite.Db.exec conn Q.update_executed (outbox_level, executed_messages) end + +module Protocols = struct + type level = First_known of int32 | Activation_level of int32 + + type proto_info = { + level : level; + proto_level : int; + protocol : Protocol_hash.t; + } + + module Q = struct + open Types + + let proto_info = + product (fun protocol proto_level first_level first_is_activation -> + let level = + if first_is_activation then Activation_level first_level + else First_known first_level + in + {level; proto_level; protocol}) + @@ proj protocol_hash (fun p -> p.protocol) + @@ proj int (fun p -> p.proto_level) + @@ proj level (fun {level = First_known l | Activation_level l; _} -> l) + @@ proj bool (function + | {level = First_known _; _} -> false + | {level = Activation_level _; _} -> true) + @@ proj_end + + let insert = + (proto_info ->. unit) + @@ {sql| + REPLACE INTO protocols + (hash, proto_level, first_level, first_is_activation) + VALUES (?, ?, ?, ?) + |sql} + + let select = + (protocol_hash ->? proto_info) + @@ {sql| + SELECT hash, proto_level, first_level, first_is_activation + FROM protocols + WHERE hash = ? + |sql} + + let proto_of_level = + (level ->? proto_info) + @@ {sql| + SELECT hash, proto_level, first_level, first_is_activation + FROM protocols + WHERE ($1 >= first_level AND first_is_activation = false) + OR ($1 > first_level AND first_is_activation = true) + ORDER BY first_level DESC LIMIT 1 + |sql} + + let last = + (unit ->? proto_info) + @@ {sql| + SELECT hash, proto_level, first_level, first_is_activation + FROM protocols + ORDER BY first_level DESC LIMIT 1 + |sql} + end + + let store ?conn store proto = + with_connection store conn @@ fun conn -> Sqlite.Db.exec conn Q.insert proto + + let find ?conn store inbox_hash = + with_connection store conn @@ fun conn -> + Sqlite.Db.find_opt conn Q.select inbox_hash + + let proto_of_level ?conn store level = + with_connection store conn @@ fun conn -> + Sqlite.Db.find_opt conn Q.proto_of_level level + + let last ?conn store = + with_connection store conn @@ fun conn -> Sqlite.Db.find_opt conn Q.last () +end diff --git a/src/lib_smart_rollup_node/sql_store.mli b/src/lib_smart_rollup_node/sql_store.mli index 27a431f3f4b9..1f9802619928 100644 --- a/src/lib_smart_rollup_node/sql_store.mli +++ b/src/lib_smart_rollup_node/sql_store.mli @@ -160,3 +160,41 @@ module Outbox_messages : sig index:int -> unit tzresult Lwt.t end + +(** Storage for protocol activation levels. *) +module Protocols : sig + type level = First_known of int32 | Activation_level of int32 + + (** Each element of this type represents information we have about a Tezos + protocol regarding its activation. *) + type proto_info = { + level : level; + (** The level at which we have seen the protocol for the first time, + either because we saw its activation or because the first block we + saw (at the origination of the rollup) was from this protocol. *) + proto_level : int; + (** The protocol level, i.e. its number in the sequence of protocol + activations on the chain. *) + protocol : Protocol_hash.t; (** The protocol this information concerns. *) + } + + (** Store a new protocol with its activation level. NOTE: if the protocol hash + is already registered, it will be overwritten. *) + val store : ?conn:Sqlite.conn -> rw -> proto_info -> unit tzresult Lwt.t + + (** Retrieve protocol information by protocol hash. *) + val find : + ?conn:Sqlite.conn -> + _ t -> + Protocol_hash.t -> + proto_info option tzresult Lwt.t + + (** [proto_of_level ?conn s level] returns the protocol in which [level] + appears. It returns [None] if [level] is before the activation of the + first known protocol. *) + val proto_of_level : + ?conn:Sqlite.conn -> _ t -> int32 -> proto_info option tzresult Lwt.t + + (** Returns the last protocol by activation level. *) + val last : ?conn:Sqlite.conn -> _ t -> proto_info option tzresult Lwt.t +end -- GitLab From 12a3f26c9646db498e4dda4fb2debec02f10cd63 Mon Sep 17 00:00:00 2001 From: Alain Mebsout Date: Thu, 26 Sep 2024 15:07:40 +0200 Subject: [PATCH 11/21] Rollup node store: DAL slot headers SQLite store --- src/lib_smart_rollup_node/sql_store.ml | 73 +++++++++++++++++++++++++ src/lib_smart_rollup_node/sql_store.mli | 37 +++++++++++++ 2 files changed, 110 insertions(+) diff --git a/src/lib_smart_rollup_node/sql_store.ml b/src/lib_smart_rollup_node/sql_store.ml index 8bde04b1597d..7c26d83eafcf 100644 --- a/src/lib_smart_rollup_node/sql_store.ml +++ b/src/lib_smart_rollup_node/sql_store.ml @@ -109,6 +109,14 @@ module Types = struct ~decode:Protocol_hash.of_b58check string + let dal_commitment = + tzcustom + ~encode:Dal.Commitment.to_b58check + ~decode:Dal.Commitment.of_b58check + string + + let dal_slot_index : Dal.Slot_index.t Caqti_type.t = int16 + let z = custom ~encode:(fun i -> Ok (Z.to_string i)) @@ -652,3 +660,68 @@ module Protocols = struct let last ?conn store = with_connection store conn @@ fun conn -> Sqlite.Db.find_opt conn Q.last () end + +module Dal_slots_headers = struct + module Q = struct + open Types + + let slot_header = + let open Dal.Slot_header in + product (fun index published_level commitment -> + {id = {published_level; index}; commitment}) + @@ proj dal_slot_index (fun h -> h.id.index) + @@ proj level (fun h -> h.id.published_level) + @@ proj dal_commitment (fun h -> h.commitment) + @@ proj_end + + let insert = + (t2 block_hash slot_header ->. unit) + @@ {sql| + REPLACE INTO dal_slots_headers + (block_hash, slot_index, published_level, slot_commitment) + VALUES (?, ?, ?, ?) + |sql} + + let find_slot_header = + (t2 block_hash dal_slot_index ->? slot_header) + @@ {sql| + SELECT slot_index, published_level, slot_commitment + FROM dal_slots_headers + WHERE block_hash = ? AND slot_index = ? + |sql} + + let select_slot_headers = + (block_hash ->* slot_header) + @@ {sql| + SELECT slot_index, published_level, slot_commitment + FROM dal_slots_headers + WHERE block_hash = ? + ORDER BY slot_index DESC + |sql} + + let select_slot_indexes = + (block_hash ->* dal_slot_index) + @@ {sql| + SELECT slot_index + FROM dal_slots_headers + WHERE block_hash = ? + ORDER BY slot_index DESC + |sql} + end + + let store ?conn store block slot_header = + with_connection store conn @@ fun conn -> + Sqlite.Db.exec conn Q.insert (block, slot_header) + + let find_slot_header ?conn store block ~slot_index = + with_connection store conn @@ fun conn -> + Sqlite.Db.find_opt conn Q.find_slot_header (block, slot_index) + + let list_slot_headers ?conn store block = + with_connection store conn @@ fun conn -> + Sqlite.Db.rev_collect_list conn Q.select_slot_headers block + + let list_slot_indexes ?conn store block = + with_connection store conn @@ fun conn -> + Sqlite.Db.rev_collect_list conn Q.select_slot_indexes block +end diff --git a/src/lib_smart_rollup_node/sql_store.mli b/src/lib_smart_rollup_node/sql_store.mli index 1f9802619928..f7da6288c5f8 100644 --- a/src/lib_smart_rollup_node/sql_store.mli +++ b/src/lib_smart_rollup_node/sql_store.mli @@ -198,3 +198,40 @@ module Protocols : sig (** Returns the last protocol by activation level. *) val last : ?conn:Sqlite.conn -> _ t -> proto_info option tzresult Lwt.t end + +(** Published slot headers per block hash, stored as a list of bindings from + [Dal_slot_index.t] to [Dal.Slot.t]. The encoding function converts this list + into a [Dal.Slot_index.t]-indexed map. *) +module Dal_slots_headers : sig + (** [store ?conn s block slot_header] stores the slot header [slot_header] as + being published in the L1 block whose hash is [block]. *) + val store : + ?conn:Sqlite.conn -> + rw -> + Block_hash.t -> + Dal.Slot_header.t -> + unit tzresult Lwt.t + + (** Retrieve a published slot header by its publication block and slot + index. *) + val find_slot_header : + ?conn:Sqlite.conn -> + _ t -> + Block_hash.t -> + slot_index:Dal.Slot_index.t -> + Dal.Slot_header.t option tzresult Lwt.t + + (** List all slot headers published in a block. *) + val list_slot_headers : + ?conn:Sqlite.conn -> + _ t -> + Block_hash.t -> + Dal.Slot_header.t list tzresult Lwt.t + + (** List all indexes of slot headers published in a block. *) + val list_slot_indexes : + ?conn:Sqlite.conn -> + _ t -> + Block_hash.t -> + Dal.Slot_index.t list tzresult Lwt.t +end -- GitLab From e44e39568b2ccb7c85c2fe12fc852c16c6610994 Mon Sep 17 00:00:00 2001 From: Alain Mebsout Date: Thu, 26 Sep 2024 15:09:06 +0200 Subject: [PATCH 12/21] Rollup node store: DAL slot statuses SQLite store --- src/lib_smart_rollup_node/sql_store.ml | 49 +++++++++++++++++++++++++ src/lib_smart_rollup_node/sql_store.mli | 33 +++++++++++++++++ 2 files changed, 82 insertions(+) diff --git a/src/lib_smart_rollup_node/sql_store.ml b/src/lib_smart_rollup_node/sql_store.ml index 7c26d83eafcf..6dadf1a496d2 100644 --- a/src/lib_smart_rollup_node/sql_store.ml +++ b/src/lib_smart_rollup_node/sql_store.ml @@ -117,6 +117,12 @@ module Types = struct let dal_slot_index : Dal.Slot_index.t Caqti_type.t = int16 + let dal_slot_status = + custom + ~encode:(function `Confirmed -> Ok true | `Unconfirmed -> Ok false) + ~decode:(function true -> Ok `Confirmed | false -> Ok `Unconfirmed) + bool + let z = custom ~encode:(fun i -> Ok (Z.to_string i)) @@ -725,3 +731,46 @@ module Dal_slots_headers = struct with_connection store conn @@ fun conn -> Sqlite.Db.rev_collect_list conn Q.select_slot_indexes block end + +module Dal_slots_statuses = struct + module Q = struct + open Types + + let insert = + (t3 block_hash dal_slot_index dal_slot_status ->. unit) + @@ {sql| + REPLACE INTO dal_slots_statuses + (block_hash, slot_index, attested) + VALUES (?, ?, ?) + |sql} + + let find_slot_status = + (t2 block_hash dal_slot_index ->? dal_slot_status) + @@ {sql| + SELECT attested + FROM dal_slots_statuses + WHERE block_hash = ? AND slot_index = ? + |sql} + + let select_slot_statuses = + (block_hash ->* t2 dal_slot_index dal_slot_status) + @@ {sql| + SELECT slot_index, attested + FROM dal_slots_statuses + WHERE block_hash = ? + ORDER BY slot_index DESC + |sql} + end + + let store ?conn store block slot_index slot_status = + with_connection store conn @@ fun conn -> + Sqlite.Db.exec conn Q.insert (block, slot_index, slot_status) + + let find_slot_status ?conn store block ~slot_index = + with_connection store conn @@ fun conn -> + Sqlite.Db.find_opt conn Q.find_slot_status (block, slot_index) + + let list_slot_statuses ?conn store block = + with_connection store conn @@ fun conn -> + Sqlite.Db.rev_collect_list conn Q.select_slot_statuses block +end diff --git a/src/lib_smart_rollup_node/sql_store.mli b/src/lib_smart_rollup_node/sql_store.mli index f7da6288c5f8..90fcb92ce250 100644 --- a/src/lib_smart_rollup_node/sql_store.mli +++ b/src/lib_smart_rollup_node/sql_store.mli @@ -235,3 +235,36 @@ module Dal_slots_headers : sig Block_hash.t -> Dal.Slot_index.t list tzresult Lwt.t end + +(** [Dal_slots_statuses] is used to store the attestation status of DAL + slots. The values of this storage module have type `[`Confirmed | + `Unconfirmed]`, depending on whether the content of the slot has been + attested on L1 or not. If an entry is not present for a [(block_hash, + slot_index)], this means that the corresponding block is not processed yet. +*) +module Dal_slots_statuses : sig + (** [store ?conn s block slot_index status] store the attestation status of + slot published in [block] and index [slot_index]. *) + val store : + ?conn:Sqlite.conn -> + rw -> + Block_hash.t -> + Dal.Slot_index.t -> + [`Confirmed | `Unconfirmed] -> + unit tzresult Lwt.t + + (** Retrieve the attestation status of a DAL slot. *) + val find_slot_status : + ?conn:Sqlite.conn -> + _ t -> + Block_hash.t -> + slot_index:Dal.Slot_index.t -> + [`Confirmed | `Unconfirmed] option tzresult Lwt.t + + (** List attestation statuses of all slots published in a given L1 block. *) + val list_slot_statuses : + ?conn:Sqlite.conn -> + _ t -> + Block_hash.t -> + (Dal.Slot_index.t * [`Confirmed | `Unconfirmed]) list tzresult Lwt.t +end -- GitLab From df1fba88ec969282d28caa010a75ed6322d0f1f0 Mon Sep 17 00:00:00 2001 From: Alain Mebsout Date: Thu, 26 Sep 2024 15:10:14 +0200 Subject: [PATCH 13/21] Rollup node store: L2 levels SQLite store --- src/lib_smart_rollup_node/sql_store.ml | 46 +++++++++++++++++++++++++ src/lib_smart_rollup_node/sql_store.mli | 12 +++++++ 2 files changed, 58 insertions(+) diff --git a/src/lib_smart_rollup_node/sql_store.ml b/src/lib_smart_rollup_node/sql_store.ml index 6dadf1a496d2..756ba7216f47 100644 --- a/src/lib_smart_rollup_node/sql_store.ml +++ b/src/lib_smart_rollup_node/sql_store.ml @@ -79,6 +79,12 @@ module Types = struct |> Result.map_error (fun err -> Format.asprintf "%a" pp_print_trace err)) t + let block_hash = + tzcustom + ~encode:Block_hash.to_b58check + ~decode:Block_hash.of_b58check + string + let inbox_hash = tzcustom ~encode:Inbox_hash.to_b58check @@ -774,3 +780,43 @@ module Dal_slots_statuses = struct with_connection store conn @@ fun conn -> Sqlite.Db.rev_collect_list conn Q.select_slot_statuses block end + +module L2_levels = struct + module Q = struct + open Types + + let insert = + (t2 level block_hash ->. unit) + @@ {sql| + REPLACE INTO l2_levels + (level, block_hash) + VALUES (?, ?) + |sql} + + let select = + (level ->? block_hash) + @@ {sql| + SELECT block_hash + FROM l2_levels + WHERE level = ? + |sql} + + let delete_before = + (level ->. unit) + @@ {sql| + DELETE FROM l2_levels WHERE level < ? + |sql} + end + + let store ?conn store level block = + with_connection store conn @@ fun conn -> + Sqlite.Db.exec conn Q.insert (level, block) + + let find ?conn store level = + with_connection store conn @@ fun conn -> + Sqlite.Db.find_opt conn Q.select level + + let delete_before ?conn store ~level = + with_connection store conn @@ fun conn -> + Sqlite.Db.exec conn Q.delete_before level +end diff --git a/src/lib_smart_rollup_node/sql_store.mli b/src/lib_smart_rollup_node/sql_store.mli index 90fcb92ce250..4d05034a44e2 100644 --- a/src/lib_smart_rollup_node/sql_store.mli +++ b/src/lib_smart_rollup_node/sql_store.mli @@ -268,3 +268,15 @@ module Dal_slots_statuses : sig Block_hash.t -> (Dal.Slot_index.t * [`Confirmed | `Unconfirmed]) list tzresult Lwt.t end + +(** Storage for associating levels to block hashes *) +module L2_levels : sig + (** [store ?conn s level block] associates the block hash [block] to + [level]. *) + val store : + ?conn:Sqlite.conn -> rw -> int32 -> Block_hash.t -> unit tzresult Lwt.t + + (** Retrieve the block hash associated to a given level. *) + val find : + ?conn:Sqlite.conn -> _ t -> int32 -> Block_hash.t option tzresult Lwt.t +end -- GitLab From 00f8faf4f65497fa996d55ab6aa94446849d1b6e Mon Sep 17 00:00:00 2001 From: Alain Mebsout Date: Thu, 26 Sep 2024 15:11:09 +0200 Subject: [PATCH 14/21] Rollup node store: L2 blocks SQLite store --- src/lib_smart_rollup_node/sql_store.ml | 219 ++++++++++++++++++++++++ src/lib_smart_rollup_node/sql_store.mli | 52 ++++++ 2 files changed, 271 insertions(+) diff --git a/src/lib_smart_rollup_node/sql_store.ml b/src/lib_smart_rollup_node/sql_store.ml index 756ba7216f47..28cbedfc5a1f 100644 --- a/src/lib_smart_rollup_node/sql_store.ml +++ b/src/lib_smart_rollup_node/sql_store.ml @@ -97,6 +97,12 @@ module Types = struct ~decode:Merkelized_payload_hashes_hash.of_b58check string + let context_hash = + tzcustom + ~encode:Smart_rollup_context_hash.to_b58check + ~decode:Smart_rollup_context_hash.of_b58check + string + let commitment_hash = tzcustom ~encode:Commitment.Hash.to_b58check @@ -820,3 +826,216 @@ module L2_levels = struct with_connection store conn @@ fun conn -> Sqlite.Db.exec conn Q.delete_before level end + +module L2_blocks = struct + module Q = struct + open Types + + let l2_block = + let open Sc_rollup_block in + product + (fun + block_hash + level + predecessor + commitment_hash + previous_commitment_hash + context + inbox_witness + inbox_hash + initial_tick + num_ticks + -> + { + header = + { + block_hash; + level; + predecessor; + commitment_hash; + previous_commitment_hash; + context; + inbox_witness; + inbox_hash; + }; + content = (); + initial_tick; + num_ticks; + }) + @@ proj block_hash (fun b -> b.header.block_hash) + @@ proj level (fun b -> b.header.level) + @@ proj block_hash (fun b -> b.header.predecessor) + @@ proj (option commitment_hash) (fun b -> b.header.commitment_hash) + @@ proj commitment_hash (fun b -> b.header.previous_commitment_hash) + @@ proj context_hash (fun b -> b.header.context) + @@ proj payload_hashes_hash (fun b -> b.header.inbox_witness) + @@ proj inbox_hash (fun b -> b.header.inbox_hash) + @@ proj z (fun b -> b.initial_tick) + @@ proj int64 (fun b -> b.num_ticks) + @@ proj_end + + let insert = + (l2_block ->. unit) + @@ {sql| + REPLACE INTO l2_blocks + (block_hash, level, predecessor, commitment_hash, + previous_commitment_hash, context, inbox_witness, + inbox_hash, initial_tick, num_ticks) + VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?) + |sql} + + let select = + (block_hash ->? l2_block) + @@ {sql| + SELECT + block_hash, level, predecessor, commitment_hash, + previous_commitment_hash, context, inbox_witness, + inbox_hash, initial_tick, num_ticks + FROM l2_blocks + WHERE block_hash = ? + |sql} + + let select_by_level = + (level ->? l2_block) + @@ {sql| + SELECT + block_hash, level, predecessor, commitment_hash, + previous_commitment_hash, context, inbox_witness, + inbox_hash, initial_tick, num_ticks + FROM l2_blocks + WHERE level = ? + |sql} + + let select_level = + (block_hash ->? level) + @@ {sql| + SELECT level + FROM l2_blocks + WHERE block_hash = ? + |sql} + + let select_context = + (block_hash ->? context_hash) + @@ {sql| + SELECT context + FROM l2_blocks + WHERE block_hash = ? + |sql} + + let select_head = + (unit ->? l2_block) + @@ {sql| + SELECT + b.block_hash, b.level, b.predecessor, b.commitment_hash, + b.previous_commitment_hash, b.context, b.inbox_witness, + b.inbox_hash, b.initial_tick, b.num_ticks + FROM l2_blocks as b + INNER JOIN rollup_node_state as s + ON s.name = "l2_head" AND s.value = b.block_hash + |sql} + + let select_finalized = + (unit ->? l2_block) + @@ {sql| + SELECT + b.block_hash, b.level, b.predecessor, b.commitment_hash, + b.previous_commitment_hash, b.context, b.inbox_witness, + b.inbox_hash, b.initial_tick, b.num_ticks + FROM l2_blocks AS b + INNER JOIN rollup_node_state as s + ON s.name = "finalized_level" AND s.level = b.level + |sql} + + let select_level_and_predecessor = + (block_hash ->? t2 level block_hash) + @@ {sql| + SELECT level, predecessor + FROM l2_blocks + WHERE block_hash = ? + |sql} + + let select_full = + (block_hash ->? t4 l2_block (option commitment) inbox messages_list) + @@ {sql| + SELECT + b.block_hash, b.level, b.predecessor, b.commitment_hash, + b.previous_commitment_hash, b.context, b.inbox_witness, + b.inbox_hash, b.initial_tick, b.num_ticks, + c.compressed_state, c.inbox_level, c.predecessor, c.number_of_ticks, + i.inbox_level, i.history_proof, + m.message_list + FROM l2_blocks AS b + LEFT JOIN commitments as c + ON c.hash = b.commitment_hash + INNER JOIN inboxes as i + ON i.hash = b.inbox_hash + INNER JOIN messages as m + ON m.payload_hashes_hash = b.inbox_witness + WHERE block_hash = ? + |sql} + + let delete_before = + (level ->. unit) + @@ {sql| + DELETE FROM l2_blocks WHERE level < ? + |sql} + end + + let store ?conn store l2_block = + with_connection store conn @@ fun conn -> + Sqlite.Db.exec conn Q.insert l2_block + + let find ?conn store block_hash = + with_connection store conn @@ fun conn -> + Sqlite.Db.find_opt conn Q.select block_hash + + let find_by_level ?conn store level = + with_connection store conn @@ fun conn -> + Sqlite.Db.find_opt conn Q.select_by_level level + + let find_level ?conn store block_hash = + with_connection store conn @@ fun conn -> + Sqlite.Db.find_opt conn Q.select_level block_hash + + let find_context ?conn store block_hash = + with_connection store conn @@ fun conn -> + Sqlite.Db.find_opt conn Q.select_context block_hash + + let find_head ?conn store = + with_connection store conn @@ fun conn -> + Sqlite.Db.find_opt conn Q.select_head () + + let find_finalized ?conn store = + with_connection store conn @@ fun conn -> + Sqlite.Db.find_opt conn Q.select_finalized () + + let find_predecessor ?conn store block_hash = + let open Lwt_result_syntax in + let+ level_pred = + with_connection store conn @@ fun conn -> + Sqlite.Db.find_opt conn Q.select_level_and_predecessor block_hash + in + Option.map + (fun (level, predecessor) -> (predecessor, Int32.pred level)) + level_pred + + let find_full ?conn store block_hash = + let open Lwt_result_syntax in + let+ res = + with_connection store conn @@ fun conn -> + Sqlite.Db.find_opt conn Q.select_full block_hash + in + match res with + | None -> None + | Some (l2_block, commitment, inbox, messages) -> + Some + Sc_rollup_block. + { + l2_block with + content = {inbox; messages; commitment; outbox = None}; + } + + let delete_before ?conn store ~level = + with_connection store conn @@ fun conn -> + Sqlite.Db.exec conn Q.delete_before level +end diff --git a/src/lib_smart_rollup_node/sql_store.mli b/src/lib_smart_rollup_node/sql_store.mli index 4d05034a44e2..b0a25a315d1a 100644 --- a/src/lib_smart_rollup_node/sql_store.mli +++ b/src/lib_smart_rollup_node/sql_store.mli @@ -280,3 +280,55 @@ module L2_levels : sig val find : ?conn:Sqlite.conn -> _ t -> int32 -> Block_hash.t option tzresult Lwt.t end + +(** Storage for L2 blocks contracted by the rollup node. *) +module L2_blocks : sig + (** Store an L2 block. *) + val store : + ?conn:Sqlite.conn -> rw -> Sc_rollup_block.t -> unit tzresult Lwt.t + + (** Retrieve an L2 block by the L1 block hash. *) + val find : + ?conn:Sqlite.conn -> + _ t -> + Block_hash.t -> + Sc_rollup_block.t option tzresult Lwt.t + + (** Retrieve an L2 block by its level. *) + val find_by_level : + ?conn:Sqlite.conn -> _ t -> int32 -> Sc_rollup_block.t option tzresult Lwt.t + + (** Retrieve the level of an L2 block with its hash. *) + val find_level : + ?conn:Sqlite.conn -> _ t -> Block_hash.t -> int32 option tzresult Lwt.t + + (** Retrieve the context hash for an L2 block with its hash. *) + val find_context : + ?conn:Sqlite.conn -> + _ t -> + Block_hash.t -> + Smart_rollup_context_hash.t option tzresult Lwt.t + + (** Retrieve the current head of the L2 chain. *) + val find_head : + ?conn:Sqlite.conn -> _ t -> Sc_rollup_block.t option tzresult Lwt.t + + (** Retrieve the currently last finalized block of the L2 chain. *) + val find_finalized : + ?conn:Sqlite.conn -> _ t -> Sc_rollup_block.t option tzresult Lwt.t + + (** Returns the predecessor, and its level, of an L2 block. *) + val find_predecessor : + ?conn:Sqlite.conn -> + _ t -> + Block_hash.t -> + (Block_hash.t * int32) option tzresult Lwt.t + + (** Returns a full L2 block (i.e. with all information) with a single database + query. NOTE: The result does not contain the outbox. *) + val find_full : + ?conn:Sqlite.conn -> + _ t -> + Block_hash.t -> + Sc_rollup_block.full option tzresult Lwt.t +end -- GitLab From c35c5ee9e3582bcfa2705ddfcb049d1788dbbee0 Mon Sep 17 00:00:00 2001 From: Alain Mebsout Date: Thu, 26 Sep 2024 15:12:59 +0200 Subject: [PATCH 15/21] Rollup node store: SQLite store for values of the rollup node state This table contains single values that are updated by the rollup node to keep persistent information about its state. --- src/lib_smart_rollup_node/sql_store.ml | 173 ++++++++++++++++++++++++ src/lib_smart_rollup_node/sql_store.mli | 31 +++++ 2 files changed, 204 insertions(+) diff --git a/src/lib_smart_rollup_node/sql_store.ml b/src/lib_smart_rollup_node/sql_store.ml index 28cbedfc5a1f..d4e29ff5eac0 100644 --- a/src/lib_smart_rollup_node/sql_store.ml +++ b/src/lib_smart_rollup_node/sql_store.ml @@ -166,6 +166,12 @@ module Types = struct @@ proj level (fun i -> i.Inbox.level) @@ proj history_proof (fun i -> i.Inbox.old_levels_messages) @@ proj_end + + let history_mode = + custom + ~encode:(fun h -> Ok (Configuration.string_of_history_mode h)) + ~decode:(fun s -> Ok (Configuration.history_mode_of_string s)) + string end let table_exists_req = @@ -1039,3 +1045,170 @@ module L2_blocks = struct with_connection store conn @@ fun conn -> Sqlite.Db.exec conn Q.delete_before level end + +module State = struct + module Q (N : sig + val name : string + end) = + struct + open Types + + let set_level = + (level ->. unit) + @@ Format.sprintf + {sql|REPLACE INTO rollup_node_state (name, level) VALUES (%S, ?)|sql} + N.name + + let get_level = + (unit ->? level) + @@ Format.sprintf + {sql|SELECT level from rollup_node_state WHERE name = %S|sql} + N.name + + let set_value type_ = + (type_ ->. unit) + @@ Format.sprintf + {sql|REPLACE INTO rollup_node_state (name, value) VALUES (%S, ?)|sql} + N.name + + let get_value type_ = + (unit ->? type_) + @@ Format.sprintf + {sql|SELECT value from rollup_node_state WHERE name = %S|sql} + N.name + + let set_both type_ = + (t2 type_ level ->. unit) + @@ Format.sprintf + {sql| + REPLACE INTO rollup_node_state (name, value, level) + VALUES (%S, ?, ?) + |sql} + N.name + + let get_both type_ = + (unit ->? t2 type_ level) + @@ Format.sprintf + {sql|SELECT value, level from rollup_node_state WHERE name = %S|sql} + N.name + end + + module type S = sig + type value + + val set : ?conn:Sqlite.conn -> rw -> value -> unit tzresult Lwt.t + + val get : ?conn:Sqlite.conn -> _ t -> value option tzresult Lwt.t + end + + module Make_level (N : sig + val name : string + end) : S with type value := int32 = struct + module Q = Q (N) + + let set ?conn store level = + with_connection store conn @@ fun conn -> + Sqlite.Db.exec conn Q.set_level level + + let get ?conn store = + with_connection store conn @@ fun conn -> + Sqlite.Db.find_opt conn Q.get_level () + end + + module Make_value (N : sig + type value + + val name : string + + val type_ : value Caqti_type.t + end) : S with type value := N.value = struct + module Q = Q (N) + + let set = Q.set_value N.type_ + + let set ?conn store value = + with_connection store conn @@ fun conn -> Sqlite.Db.exec conn set value + + let get = Q.get_value N.type_ + + let get ?conn store = + with_connection store conn @@ fun conn -> Sqlite.Db.find_opt conn get () + end + + module Make_both (N : sig + type value + + val name : string + + val type_ : value Caqti_type.t + end) : S with type value := N.value * int32 = struct + module Q = Q (N) + + let set = Q.set_both N.type_ + + let set ?conn store (value, level) = + with_connection store conn @@ fun conn -> + Sqlite.Db.exec conn set (value, level) + + let get = Q.get_both N.type_ + + let get ?conn store = + with_connection store conn @@ fun conn -> Sqlite.Db.find_opt conn get () + end + + module Finalized_level = Make_level (struct + let name = "finalized_level" + end) + + module LCC = Make_both (struct + let name = "lcc" + + type value = Commitment.Hash.t + + let type_ = Types.commitment_hash + end) + + module LPC = Make_both (struct + let name = "lpc" + + type value = Commitment.Hash.t + + let type_ = Types.commitment_hash + end) + + module Last_gc_target = Make_level (struct + let name = "last_gc_target" + end) + + module Last_gc_triggered_at = Make_level (struct + let name = "last_gc_triggered_at" + end) + + module Last_successful_gc_target = Make_level (struct + let name = "last_gc_target" + end) + + module Last_successful_gc_triggered_at = Make_level (struct + let name = "last_gc_triggered_at" + end) + + module Last_context_split = Make_level (struct + let name = "last_context_split" + end) + + module History_mode = Make_value (struct + let name = "history_mode" + + type value = Configuration.history_mode + + let type_ = Types.history_mode + end) + + module L2_head = Make_both (struct + let name = "l2_head" + + type value = Block_hash.t + + let type_ = Types.block_hash + end) +end diff --git a/src/lib_smart_rollup_node/sql_store.mli b/src/lib_smart_rollup_node/sql_store.mli index b0a25a315d1a..a55539a1394b 100644 --- a/src/lib_smart_rollup_node/sql_store.mli +++ b/src/lib_smart_rollup_node/sql_store.mli @@ -332,3 +332,34 @@ module L2_blocks : sig Block_hash.t -> Sc_rollup_block.full option tzresult Lwt.t end + +(** Storage of single values. *) +module State : sig + module type S = sig + type value + + val set : ?conn:Sqlite.conn -> rw -> value -> unit tzresult Lwt.t + + val get : ?conn:Sqlite.conn -> _ t -> value option tzresult Lwt.t + end + + module Finalized_level : S with type value := int32 + + module LCC : S with type value := Commitment.Hash.t * int32 + + module LPC : S with type value := Commitment.Hash.t * int32 + + module Last_gc_target : S with type value := int32 + + module Last_gc_triggered_at : S with type value := int32 + + module Last_successful_gc_target : S with type value := int32 + + module Last_successful_gc_triggered_at : S with type value := int32 + + module Last_context_split : S with type value := int32 + + module History_mode : S with type value := Configuration.history_mode + + module L2_head : S with type value := Block_hash.t * int32 +end -- GitLab From 75901930aff7779b7aa14c3a8b5e7f9087f23c46 Mon Sep 17 00:00:00 2001 From: Alain Mebsout Date: Thu, 26 Sep 2024 15:13:49 +0200 Subject: [PATCH 16/21] Rollup node/store: SQLite store garbage collection --- src/lib_smart_rollup_node/sql_store.ml | 35 +++++++++++++++++++++++++ src/lib_smart_rollup_node/sql_store.mli | 10 +++++++ src/lib_sqlite/sqlite.ml | 5 ++++ src/lib_sqlite/sqlite.mli | 4 +++ 4 files changed, 54 insertions(+) diff --git a/src/lib_smart_rollup_node/sql_store.ml b/src/lib_smart_rollup_node/sql_store.ml index d4e29ff5eac0..a18ac337c8e4 100644 --- a/src/lib_smart_rollup_node/sql_store.ml +++ b/src/lib_smart_rollup_node/sql_store.ml @@ -39,6 +39,22 @@ module Events = struct ~level:Error ("applied", Data_encoding.int31) ("known", Data_encoding.int31) + + let start_gc = + declare_0 + ~section + ~name:"smart_rollup_node_store_gc_start" + ~msg:"Garbage collection started for store" + ~level:Info + () + + let finish_gc = + declare_0 + ~section + ~name:"smart_rollup_node_store_gc_finish" + ~msg:"Garbage collection finished for store" + ~level:Info + () end let with_connection store conn = @@ -1212,3 +1228,22 @@ module State = struct let type_ = Types.block_hash end) end + +let gc store ~level = + let open Lwt_result_syntax in + Sqlite.use store @@ fun conn -> + let*! () = Events.(emit start_gc) () in + let* () = + Sqlite.with_transaction conn @@ fun conn -> + let* () = L2_blocks.delete_before ~conn store ~level in + let* () = Commitments.delete_before ~conn store ~level in + let* () = Inboxes.delete_before ~conn store ~level in + let* () = Messages.delete_before ~conn store ~level in + let* () = + Commitments_published_at_levels.delete_before ~conn store ~level + in + let* () = L2_levels.delete_before ~conn store ~level in + return_unit + in + let*! () = Events.(emit finish_gc) () in + return_unit diff --git a/src/lib_smart_rollup_node/sql_store.mli b/src/lib_smart_rollup_node/sql_store.mli index a55539a1394b..e3872acbf425 100644 --- a/src/lib_smart_rollup_node/sql_store.mli +++ b/src/lib_smart_rollup_node/sql_store.mli @@ -27,6 +27,16 @@ val close : _ t -> unit Lwt.t (** Returns a read-only version of the store. *) val readonly : _ t -> ro +(** [gc store ~level] garbage collects data that relate to levels below [level] + (by removing information from the database). *) +val gc : rw -> level:int32 -> unit tzresult Lwt.t + +(** [with_transaction store f] executes [f] with a single connection to the + database in a transaction. I.e., if [f] returns an error or raises an + exception, the store will not be modified. *) +val with_transaction : + _ t -> (Sqlite.conn -> 'a tzresult Lwt.t) -> 'a tzresult Lwt.t + (** {2 Modules for storing data in the store} Each module maps to a table in the database. Every read and write function diff --git a/src/lib_sqlite/sqlite.ml b/src/lib_sqlite/sqlite.ml index aab2fc1e43f5..3cf9b0518ec1 100644 --- a/src/lib_sqlite/sqlite.ml +++ b/src/lib_sqlite/sqlite.ml @@ -158,6 +158,8 @@ module Q = struct ~decode:(function "wal" -> Ok Wal | _ -> Ok Other) string + let vacuum_self = (unit ->. unit) @@ {|VACUUM main|} + let vacuum_request = (string ->. unit) @@ {|VACUUM main INTO ?|} module Journal_mode = struct @@ -199,6 +201,9 @@ let vacuum ~conn ~output_db_file = let* () = use (Pool {db_pool}) set_wal_journal_mode in return_unit +let vacuum_self ~conn = + with_connection conn @@ fun conn -> Db.exec conn Q.vacuum_self () + let init ~path ~perm migration_code = let open Lwt_result_syntax in let* db_pool = diff --git a/src/lib_sqlite/sqlite.mli b/src/lib_sqlite/sqlite.mli index a07026611549..8a9d52caed03 100644 --- a/src/lib_sqlite/sqlite.mli +++ b/src/lib_sqlite/sqlite.mli @@ -36,6 +36,10 @@ val close : t -> unit Lwt.t function is useful to backup the database. *) val vacuum : conn:conn -> output_db_file:string -> unit tzresult Lwt.t +(** Vacuums the database itself after removing lot of data + {{:https://www.sqlite.org/lang_vacuum.html}[VACUUM] sqlite command}. *) +val vacuum_self : conn:conn -> unit tzresult Lwt.t + (** {2 Database connections} *) (** [use db k] executes [k] with a fresh connection to [db]. *) -- GitLab From da56bc46c721b5a1251b572b2950d2dca2764e63 Mon Sep 17 00:00:00 2001 From: Alain Mebsout Date: Tue, 24 Sep 2024 11:08:37 +0200 Subject: [PATCH 17/21] Rollup node/store: use new SQLite store Also deprecate index-buffer-size CLI option and config field --- .../main_smart_rollup_node.ml | 2 - src/lib_smart_rollup/dal.mli | 12 +- src/lib_smart_rollup_node/cli.ml | 2 +- src/lib_smart_rollup_node/configuration.ml | 2 +- src/lib_smart_rollup_node/interpreter.ml | 4 +- src/lib_smart_rollup_node/messages.ml | 69 --- src/lib_smart_rollup_node/messages.mli | 19 - src/lib_smart_rollup_node/node_context.ml | 522 +++++++----------- src/lib_smart_rollup_node/node_context.mli | 64 +-- .../node_context_loader.ml | 38 +- .../node_context_loader.mli | 1 - src/lib_smart_rollup_node/repair.ml | 2 +- .../rollup_node_daemon.ml | 5 +- src/lib_smart_rollup_node/rpc_directory.ml | 13 +- src/lib_smart_rollup_node/snapshots.ml | 127 ++--- src/lib_smart_rollup_node/store.ml | 2 +- src/lib_smart_rollup_node/store_migration.ml | 2 +- src/lib_smart_rollup_node/store_v5.ml | 10 + src/lib_smart_rollup_node/store_version.ml | 14 +- src/lib_smart_rollup_node/store_version.mli | 2 +- .../test/test_store_gc.ml | 27 +- .../wasm_2_0_0_utilities.ml | 78 +-- .../lib_sc_rollup_node/inbox.ml | 8 +- .../refutation_game_helpers.ml | 2 +- .../lib_sc_rollup_node/inbox.ml | 8 +- .../refutation_game_helpers.ml | 2 +- .../lib_sc_rollup_node/inbox.ml | 8 +- .../refutation_game_helpers.ml | 2 +- .../lib_sc_rollup_node/inbox.ml | 8 +- .../refutation_game_helpers.ml | 2 +- .../lib_sc_rollup_node/inbox.ml | 8 +- .../refutation_game_helpers.ml | 2 +- src/proto_alpha/lib_sc_rollup_node/inbox.ml | 8 +- .../refutation_game_helpers.ml | 2 +- 34 files changed, 380 insertions(+), 697 deletions(-) delete mode 100644 src/lib_smart_rollup_node/messages.ml delete mode 100644 src/lib_smart_rollup_node/messages.mli create mode 100644 src/lib_smart_rollup_node/store_v5.ml diff --git a/src/bin_smart_rollup_node/main_smart_rollup_node.ml b/src/bin_smart_rollup_node/main_smart_rollup_node.ml index 200e34946043..2deef106c157 100644 --- a/src/bin_smart_rollup_node/main_smart_rollup_node.ml +++ b/src/bin_smart_rollup_node/main_smart_rollup_node.ml @@ -249,7 +249,6 @@ let legacy_run_command = Rollup_node_daemon.run ~data_dir ~irmin_cache_size:Configuration.default_irmin_cache_size - ~index_buffer_size:Configuration.default_index_buffer_size ?log_kernel_debug_file configuration cctxt) @@ -363,7 +362,6 @@ let run_command = Rollup_node_daemon.run ~data_dir ~irmin_cache_size:Configuration.default_irmin_cache_size - ~index_buffer_size:Configuration.default_index_buffer_size ?log_kernel_debug_file configuration cctxt) diff --git a/src/lib_smart_rollup/dal.mli b/src/lib_smart_rollup/dal.mli index 49df5e9c4677..30da13637509 100644 --- a/src/lib_smart_rollup/dal.mli +++ b/src/lib_smart_rollup/dal.mli @@ -45,11 +45,15 @@ module Commitment : sig val encoding : t Data_encoding.t - val pp : - Format.formatter -> Tezos_crypto_dal.Cryptobox.Verifier.commitment -> unit + val pp : Format.formatter -> t -> unit - val pp_short : - Format.formatter -> Tezos_crypto_dal.Cryptobox.Verifier.commitment -> unit + val pp_short : Format.formatter -> t -> unit + + val to_b58check : t -> string + + val of_b58check_opt : string -> t option + + val of_b58check : string -> t Error_monad.tzresult end (** A slot commitment proof, same as protocol slot commitments' proofs through diff --git a/src/lib_smart_rollup_node/cli.ml b/src/lib_smart_rollup_node/cli.ml index 539b3e374ea7..15964db15da0 100644 --- a/src/lib_smart_rollup_node/cli.ml +++ b/src/lib_smart_rollup_node/cli.ml @@ -385,7 +385,7 @@ let index_buffer_size_arg = ~placeholder:"" ~doc: "The maximum cache size in memory before it is flushed to disk, used for \ - indexes of the store." + indexes of the store. (Deprecated)" positive_int_parameter let irmin_cache_size_arg = diff --git a/src/lib_smart_rollup_node/configuration.ml b/src/lib_smart_rollup_node/configuration.ml index f4e905d75405..e3c9e5824453 100644 --- a/src/lib_smart_rollup_node/configuration.ml +++ b/src/lib_smart_rollup_node/configuration.ml @@ -726,7 +726,7 @@ let encoding default_display : t Data_encoding.t = "loop_retry_delay" Data_encoding.float default_loop_retry_delay) - (opt "index_buffer_size" int31) + (opt "index_buffer_size" int31 ~description:"Deprecated") (opt "irmin_cache_size" int31) (dft "log-kernel-debug" Data_encoding.bool false) (dft "no-degraded" Data_encoding.bool false) diff --git a/src/lib_smart_rollup_node/interpreter.ml b/src/lib_smart_rollup_node/interpreter.ml index e61b8f6450ec..a749ebb3e3b9 100644 --- a/src/lib_smart_rollup_node/interpreter.ml +++ b/src/lib_smart_rollup_node/interpreter.ml @@ -186,7 +186,9 @@ let start_state_of_block plugin node_ctxt (block : Sc_rollup_block.t) = let module Plugin = (val plugin) in let*! tick = Plugin.Pvm.get_tick node_ctxt.kind state in let*! state_hash = Plugin.Pvm.state_hash node_ctxt.kind state in - let* messages = Messages.get node_ctxt block.header.inbox_witness in + let* messages = + Node_context.get_messages node_ctxt block.header.inbox_witness + in return Pvm_plugin_sig. { diff --git a/src/lib_smart_rollup_node/messages.ml b/src/lib_smart_rollup_node/messages.ml deleted file mode 100644 index af4cd01496ce..000000000000 --- a/src/lib_smart_rollup_node/messages.ml +++ /dev/null @@ -1,69 +0,0 @@ -(*****************************************************************************) -(* *) -(* SPDX-License-Identifier: MIT *) -(* Copyright (c) 2023 Functori *) -(* *) -(*****************************************************************************) - -let add_all_messages (node_ctxt : _ Node_context.t) ~messages ~pred_hash = - let open Lwt_result_syntax in - let* pred_header = Node_context.header_of_hash node_ctxt pred_hash in - let* grand_parent_header = - Node_context.header_of_hash node_ctxt pred_header.header.predecessor - in - let is_first_block = - pred_header.header.proto_level <> grand_parent_header.header.proto_level - in - let inbox_level = Int32.succ pred_header.level in - let* plugin = Protocol_plugins.proto_plugin_for_level node_ctxt inbox_level in - let module Plugin = (val plugin) in - return - @@ Plugin.Pvm.start_of_level_serialized - :: - (if is_first_block then - Option.to_list Plugin.Pvm.protocol_migration_serialized - else []) - @ Plugin.Pvm.info_per_level_serialized - ~predecessor:pred_header.hash - ~predecessor_timestamp:pred_header.header.timestamp - :: messages - @ [Plugin.Pvm.end_of_level_serialized] - -(** Returns [true] if the first messages of the parameter list is an encoded - [Start_of_level] message. *) -let has_sol = function - | "\x00\x01" :: _ -> - (* 00 is for Internal_message and 01 is for Start_of_level *) - true - | _ -> false - -let find node_ctxt messages_hash = - let open Lwt_result_syntax in - let* msg = Node_context.unsafe_find_stored_messages node_ctxt messages_hash in - match msg with - | None -> return_none - | Some (messages, pred_hash) -> - if has_sol messages then return_some messages - else - (* The messages do not contain the internal protocol messages, we add - them back. NOTE: this requires to potentially make L1 rpc calls. *) - let* messages = add_all_messages node_ctxt ~messages ~pred_hash in - let* () = - Node_context.save_messages - node_ctxt - messages_hash - ~predecessor:pred_hash - messages - in - return_some messages - -let get node_ctxt messages_hash = - let open Lwt_result_syntax in - let* res = find node_ctxt messages_hash in - match res with - | None -> - failwith - "Could not retrieve messages with payloads merkelized hash %a" - Merkelized_payload_hashes_hash.pp - messages_hash - | Some res -> return res diff --git a/src/lib_smart_rollup_node/messages.mli b/src/lib_smart_rollup_node/messages.mli deleted file mode 100644 index 973951d00c2c..000000000000 --- a/src/lib_smart_rollup_node/messages.mli +++ /dev/null @@ -1,19 +0,0 @@ -(*****************************************************************************) -(* *) -(* SPDX-License-Identifier: MIT *) -(* Copyright (c) 2023 Functori *) -(* *) -(*****************************************************************************) - -(** [get t witness_hash] retrieves the messages for the merkelized - payloads hash [witness_hash] stored by the rollup node. *) -val get : - Node_context.rw -> - Merkelized_payload_hashes_hash.t -> - string list tzresult Lwt.t - -(** Same as {!get} but returns [None] if the payloads hash is not known. *) -val find : - Node_context.rw -> - Merkelized_payload_hashes_hash.t -> - string list option tzresult Lwt.t diff --git a/src/lib_smart_rollup_node/node_context.ml b/src/lib_smart_rollup_node/node_context.ml index 6cec084ff32f..7f2853344c63 100644 --- a/src/lib_smart_rollup_node/node_context.ml +++ b/src/lib_smart_rollup_node/node_context.ml @@ -25,39 +25,33 @@ (* *) (*****************************************************************************) -type lcc = Store.Lcc.lcc = {commitment : Commitment.Hash.t; level : int32} +type lcc = {commitment : Commitment.Hash.t; level : int32} type genesis_info = Metadata.genesis_info = { level : int32; commitment_hash : Commitment.Hash.t; } -type 'a store = 'a Store.t +type 'a store = 'a Store.t constraint 'a = [< `Read | `Write > `Read] module Node_store = struct let close (s : 'a store) = Store.close s - let load : - 'a Store_sigs.mode -> - index_buffer_size:int -> - l2_blocks_cache_size:int -> - string -> - 'a store tzresult Lwt.t = - Store.load + let load : 'a Store_sigs.mode -> data_dir:string -> 'a store tzresult Lwt.t = + Store.init let check_and_set_history_mode (type a) (mode : a Store_sigs.mode) - (store : a Store.store) (history_mode : Configuration.history_mode option) - = + (store : a Store.t) (history_mode : Configuration.history_mode option) = let open Lwt_result_syntax in let* stored_history_mode = match mode with - | Read_only -> Store.History_mode.read store.history_mode - | Read_write -> Store.History_mode.read store.history_mode + | Read_only -> Store.State.History_mode.get store + | Read_write -> Store.State.History_mode.get store in let save_when_rw history_mode = match mode with | Read_only -> return_unit - | Read_write -> Store.History_mode.write store.history_mode history_mode + | Read_write -> Store.State.History_mode.set store history_mode in match (stored_history_mode, history_mode) with | None, None -> save_when_rw Configuration.default_history_mode @@ -186,15 +180,13 @@ let make_kernel_logger event ?log_kernel_debug_file logs_dir = let checkout_context node_ctxt block_hash = let open Lwt_result_syntax in - let* l2_header = - Store.L2_blocks.header node_ctxt.store.l2_blocks block_hash - in + let* context_hash = Store.L2_blocks.find_context node_ctxt.store block_hash in let*? context_hash = let open Result_syntax in - match l2_header with + match context_hash with | None -> tzfail (Rollup_node_errors.Cannot_checkout_context (block_hash, None)) - | Some {context; _} -> return context + | Some context -> return context in let*! ctxt = Context.checkout node_ctxt.context context_hash in match ctxt with @@ -232,12 +224,12 @@ end let get_history_mode {store; _} = let open Lwt_result_syntax in - let+ mode = Store.History_mode.read store.history_mode in + let+ mode = Store.State.History_mode.get store in Option.value mode ~default:Configuration.default_history_mode let hash_of_level_opt {store; cctxt; _} level = let open Lwt_result_syntax in - let* hash = Store.Levels_to_hashes.find store.levels_to_hashes level in + let* hash = Store.L2_levels.find store level in match hash with | Some hash -> return_some hash | None -> @@ -259,23 +251,18 @@ let hash_of_level node_ctxt level = let level_of_hash {l1_ctxt; store; _} hash = let open Lwt_result_syntax in - let* l2_header = Store.L2_blocks.header store.l2_blocks hash in - match l2_header with - | Some {level; _} -> return level + let* level = Store.L2_blocks.find_level store hash in + match level with + | Some level -> return level | None -> let+ {level; _} = Layer1.fetch_tezos_shell_header l1_ctxt hash in level let save_level {store; _} Layer1.{hash; level} = - Store.Levels_to_hashes.add store.levels_to_hashes level hash + Store.L2_levels.store store level hash let save_l2_block {store; _} (head : Sc_rollup_block.t) = - let head_info = {head with header = (); content = ()} in - Store.L2_blocks.append - store.l2_blocks - ~key:head.header.block_hash - ~header:head.header - ~value:head_info + Store.L2_blocks.store store head let notify_processed_tezos_level node_ctxt level = node_ctxt.sync.processed_level <- level ; @@ -283,35 +270,39 @@ let notify_processed_tezos_level node_ctxt level = let set_l2_head node_ctxt (head : Sc_rollup_block.t) = let open Lwt_result_syntax in - let+ () = Store.L2_head.write node_ctxt.store.l2_head head in + let+ () = + Store.State.L2_head.set + node_ctxt.store + (head.header.block_hash, head.header.level) + in notify_processed_tezos_level node_ctxt head.header.level ; Metrics.wrap (fun () -> Metrics.Inbox.set_head_level head.header.level) ; Lwt_watcher.notify node_ctxt.global_block_watcher head -let is_processed {store; _} head = Store.L2_blocks.mem store.l2_blocks head +let is_processed {store; _} head = + let open Lwt_result_syntax in + let+ level = Store.L2_blocks.find_level store head in + Option.is_some level -let last_processed_head_opt {store; _} = Store.L2_head.read store.l2_head +let last_processed_head_opt {store; _} = Store.L2_blocks.find_head store let mark_finalized_level {store; _} level = - Store.Last_finalized_level.write store.last_finalized_level level + Store.State.Finalized_level.set store level let get_finalized_level {store; _} = let open Lwt_result_syntax in - let+ level = Store.Last_finalized_level.read store.last_finalized_level in + let+ level = Store.State.Finalized_level.get store in Option.value level ~default:0l -let get_l2_block {store; _} block_hash = +let find_l2_block {store; _} block_hash = Store.L2_blocks.find store block_hash + +let get_l2_block node_ctxt block_hash = let open Lwt_result_syntax in - let* block = Store.L2_blocks.read store.l2_blocks block_hash in + let* block = find_l2_block node_ctxt block_hash in match block with | None -> failwith "Could not retrieve L2 block for %a" Block_hash.pp block_hash - | Some (info, header) -> return {info with Sc_rollup_block.header} - -let find_l2_block {store; _} block_hash = - let open Lwt_result_syntax in - let+ block = Store.L2_blocks.read store.l2_blocks block_hash in - Option.map (fun (info, header) -> {info with Sc_rollup_block.header}) block + | Some block -> return block let get_l2_block_by_level node_ctxt level = let open Lwt_result_syntax in @@ -326,26 +317,14 @@ let find_l2_block_by_level node_ctxt level = | None -> return_none | Some block_hash -> find_l2_block node_ctxt block_hash -let get_finalized_head_opt node_ctxt = - let open Lwt_result_syntax in - let* level = - Store.Last_finalized_level.read node_ctxt.store.last_finalized_level - in - match level with - | None -> return_none - | Some level -> find_l2_block_by_level node_ctxt level +let get_finalized_head_opt {store; _} = Store.L2_blocks.find_finalized store let head_of_block_level (hash, level) = {Layer1.hash; level} let block_level_of_head Layer1.{hash; level} = (hash, level) -let get_l2_block_predecessor node_ctxt hash = - let open Lwt_result_syntax in - let+ header = Store.L2_blocks.header node_ctxt.store.l2_blocks hash in - Option.map - (fun {Sc_rollup_block.predecessor; level; _} -> - (predecessor, Int32.pred level)) - header +let get_l2_block_predecessor {store; _} hash = + Store.L2_blocks.find_predecessor store hash let get_predecessor_opt node_ctxt (hash, level) = let open Lwt_result_syntax in @@ -424,11 +403,9 @@ let tick_search ~big_step_blocks node_ctxt ?min_level head tick = (* The starting block contains the tick we want, we are done. *) return_some head else - let* gc_levels = Store.Gc_levels.read node_ctxt.store.gc_levels in + let* last_gc_target = Store.State.Last_gc_target.get node_ctxt.store in let first_available_level = - match gc_levels with - | Some {first_available_level = l; _} -> l - | None -> node_ctxt.genesis_info.level + Option.value last_gc_target ~default:node_ctxt.genesis_info.level in let min_level = match min_level with @@ -491,14 +468,14 @@ let tick_search ~big_step_blocks node_ctxt ?min_level head tick = (* Then do dichotomy on interval [start_block; end_block] *) dicho start_block end_block -let block_with_tick ({store; _} as node_ctxt) ?min_level ~max_level tick = +let block_with_tick node_ctxt ?min_level ~max_level tick = let open Lwt_result_syntax in let open Lwt_result_option_syntax in Error.trace_lwt_result_with "Could not retrieve block with tick %a" Z.pp_print tick - @@ let** head = Store.L2_head.read store.l2_head in + @@ let** head = last_processed_head_opt node_ctxt in (* We start by taking big steps of 4096 blocks for the first approximation. This means we need at most 20 big steps to start a dichotomy on a 4096 blocks interval (at most 12 steps). We could take @@ -512,9 +489,7 @@ let block_with_tick ({store; _} as node_ctxt) ?min_level ~max_level tick = tick_search ~big_step_blocks:4096 node_ctxt ?min_level head tick let find_commitment {store; _} commitment_hash = - let open Lwt_result_syntax in - let+ commitment = Store.Commitments.read store.commitments commitment_hash in - Option.map fst commitment + Store.Commitments.find store commitment_hash let get_commitment node_ctxt commitment_hash = let open Lwt_result_syntax in @@ -527,16 +502,13 @@ let get_commitment node_ctxt commitment_hash = commitment_hash | Some i -> return i -let commitment_exists {store; _} hash = - Store.Commitments.mem store.commitments hash +let commitment_exists node_ctxt hash = + let open Lwt_result_syntax in + let+ c = find_commitment node_ctxt hash in + Option.is_some c let save_commitment {store; _} commitment = - let open Lwt_result_syntax in - let hash = Commitment.hash commitment in - let+ () = - Store.Commitments.append store.commitments ~key:hash ~value:commitment - in - hash + Store.Commitments.store store commitment let tick_offset_of_commitment_period node_ctxt (commitment : Commitment.t) = let open Lwt_result_syntax in @@ -552,30 +524,19 @@ let tick_offset_of_commitment_period node_ctxt (commitment : Commitment.t) = Z.sub commitment_final_tick (Z.of_int64 commitment.number_of_ticks) let commitment_published_at_level {store; _} commitment = - Store.Commitments_published_at_level.find - store.commitments_published_at_level - commitment + Store.Commitments_published_at_levels.get store commitment -let set_commitment_published_at_level {store; _} hash = - Store.Commitments_published_at_level.add - store.commitments_published_at_level - hash +let set_commitment_published_at_level {store; _} hash levels = + Store.Commitments_published_at_levels.register store hash levels type commitment_source = Anyone | Us -let commitment_was_published {store; _} ~source commitment_hash = +let commitment_was_published node_ctxt ~source commitment_hash = let open Lwt_result_syntax in + let+ info = commitment_published_at_level node_ctxt commitment_hash in match source with - | Anyone -> - Store.Commitments_published_at_level.mem - store.commitments_published_at_level - commitment_hash + | Anyone -> Option.is_some info | Us -> ( - let+ info = - Store.Commitments_published_at_level.find - store.commitments_published_at_level - commitment_hash - in match info with | Some {published_at_level = Some _; _} -> true | _ -> false) @@ -583,7 +544,7 @@ let commitment_was_published {store; _} ~source commitment_hash = let set_lcc node_ctxt lcc = let open Lwt_result_syntax in let lcc_l1 = Reference.get node_ctxt.lcc in - let* () = Store.Lcc.write node_ctxt.store.lcc lcc in + let* () = Store.State.LCC.set node_ctxt.store (lcc.commitment, lcc.level) in Metrics.Info.set_lcc_level_local lcc.level ; if lcc.level > lcc_l1.level then ( Reference.set node_ctxt.lcc lcc ; @@ -595,9 +556,9 @@ let set_lcc node_ctxt lcc = let last_seen_lcc {store; genesis_info; _} = let open Lwt_result_syntax in - let+ lcc = Store.Lcc.read store.lcc in + let+ lcc = Store.State.LCC.get store in match lcc with - | Some lcc -> lcc + | Some (commitment, level) -> {commitment; level} | None -> {commitment = genesis_info.commitment_hash; level = genesis_info.level} @@ -606,13 +567,11 @@ let register_published_commitment node_ctxt commitment ~first_published_at_level let open Lwt_result_syntax in let commitment_hash = Commitment.hash commitment in let* prev_publication = - Store.Commitments_published_at_level.mem - node_ctxt.store.commitments_published_at_level - commitment_hash + Store.Commitments_published_at_levels.get node_ctxt.store commitment_hash in let published_at_level = if published_by_us then Some level else None in let* () = - if (not prev_publication) || published_by_us then + if Option.is_none prev_publication || published_by_us then set_commitment_published_at_level node_ctxt commitment_hash @@ -622,7 +581,9 @@ let register_published_commitment node_ctxt commitment ~first_published_at_level if published_by_us then Metrics.Info.set_lpc_level_local level else Metrics.Info.set_lpc_level_l1 level ; when_ published_by_us @@ fun () -> - let* () = Store.Lpc.write node_ctxt.store.lpc commitment in + let* () = + Store.State.LPC.set node_ctxt.store (commitment_hash, commitment.inbox_level) + in let update_lpc_ref = match Reference.get node_ctxt.lpc with | None -> true @@ -631,10 +592,7 @@ let register_published_commitment node_ctxt commitment ~first_published_at_level if update_lpc_ref then Reference.set node_ctxt.lpc (Some commitment) ; return_unit -let find_inbox {store; _} inbox_hash = - let open Lwt_result_syntax in - let+ inbox = Store.Inboxes.read store.inboxes inbox_hash in - Option.map fst inbox +let find_inbox {store; _} inbox_hash = Store.Inboxes.find store inbox_hash let get_inbox node_ctxt inbox_hash = let open Lwt_result_syntax in @@ -647,18 +605,10 @@ let get_inbox node_ctxt inbox_hash = inbox_hash | Some i -> return i -let save_inbox {store; _} inbox = - let open Lwt_result_syntax in - let hash = Octez_smart_rollup.Inbox.hash inbox in - let+ () = Store.Inboxes.append store.inboxes ~key:hash ~value:inbox in - hash +let save_inbox {store; _} inbox = Store.Inboxes.store store inbox -let find_inbox_by_block_hash ({store; _} as node_ctxt) block_hash = - let open Lwt_result_syntax in - let* header = Store.L2_blocks.header store.l2_blocks block_hash in - match header with - | None -> return_none - | Some {inbox_hash; _} -> find_inbox node_ctxt inbox_hash +let find_inbox_by_block_hash {store; _} block_hash = + Store.Inboxes.find_by_block_hash store block_hash let inbox_of_head node_ctxt Layer1.{hash = block_hash; level = block_level} = let open Lwt_result_syntax in @@ -691,55 +641,37 @@ let get_inbox_by_block_hash node_ctxt hash = let* level = level_of_hash node_ctxt hash in inbox_of_head node_ctxt {hash; level} -let unsafe_find_stored_messages node_ctxt = - Store.Messages.read node_ctxt.store.messages +let find_messages {store; _} payload_hash = + let open Lwt_result_syntax in + let+ res = Store.Messages.find store payload_hash in + Option.map snd res -let unsafe_get_stored_messages node_ctxt messages_hash = +let get_messages node_ctxt messages_hash = let open Lwt_result_syntax in - let* res = unsafe_find_stored_messages node_ctxt messages_hash in + let* res = find_messages node_ctxt messages_hash in match res with | None -> failwith "Could not retrieve messages with payloads merkelized hash %a" Merkelized_payload_hashes_hash.pp messages_hash - | Some (messages, _pred) -> return messages + | Some messages -> return messages -let get_num_messages {store; _} hash = +let get_num_messages node_ctxt hash = let open Lwt_result_syntax in - let* msg = Store.Messages.read store.messages hash in - match msg with - | None -> - failwith - "Could not retrieve number of messages for inbox witness %a" - Merkelized_payload_hashes_hash.pp - hash - | Some (messages, _pred_hash) -> return (List.length messages) + let+ messages = get_messages node_ctxt hash in + List.length messages -let save_messages {store; _} key ~predecessor messages = - Store.Messages.append - store.messages - ~key - ~header:predecessor - ~value:(messages :> string list) +let save_messages {store; _} key ~level messages = + Store.Messages.store store ~level key (messages :> string list) let set_outbox_message_executed {store; _} ~outbox_level ~index = - Store.Outbox_messages.set_outbox_message_executed - store.outbox_messages - ~outbox_level - ~index - -let register_new_outbox_messages {store; _} ~outbox_level ~indexes = - Store.Outbox_messages.register_new_outbox_messages - store.outbox_messages - ~outbox_level - ~indexes - -let register_missing_outbox_messages {store; _} ~outbox_level ~indexes = - Store.Outbox_messages.register_missing_outbox_messages - store.outbox_messages - ~outbox_level - ~indexes + Store.Outbox_messages.set_outbox_message_executed store ~outbox_level ~index + +let register_outbox_messages {store; _} ~outbox_level ~indexes = + let open Lwt_result_syntax in + let*? indexes = Bitset.from_list indexes in + Store.Outbox_messages.register_outbox_messages store ~outbox_level ~indexes let get_executable_pending_outbox_messages {store; lcc; current_protocol; _} = let max_level = (Reference.get lcc).level in @@ -751,7 +683,7 @@ let get_executable_pending_outbox_messages {store; lcc; current_protocol; _} = (constants.max_number_of_stored_cemented_commitments * constants.commitment_period_in_blocks)) in - Store.Outbox_messages.pending store.outbox_messages ~min_level ~max_level + Store.Outbox_messages.pending store ~min_level ~max_level let get_unexecutable_pending_outbox_messages ({store; lcc; _} as node_ctxt) = let open Lwt_result_syntax in @@ -762,35 +694,33 @@ let get_unexecutable_pending_outbox_messages ({store; lcc; _} as node_ctxt) = | Some h -> Ok h.header.level in let min_level = Int32.succ (Reference.get lcc).level in - Store.Outbox_messages.pending store.outbox_messages ~min_level ~max_level + Store.Outbox_messages.pending store ~min_level ~max_level let get_full_l2_block ?get_outbox_messages node_ctxt block_hash = let open Lwt_result_syntax in - let* block = get_l2_block node_ctxt block_hash in - let* inbox = get_inbox node_ctxt block.header.inbox_hash - and* messages = - unsafe_get_stored_messages node_ctxt block.header.inbox_witness - and* commitment = - Option.map_es (get_commitment node_ctxt) block.header.commitment_hash - and* outbox = - match get_outbox_messages with - | None -> return_none - | Some get_outbox_messages -> ( - let* ctxt = checkout_context node_ctxt block_hash in - let*! pvm_state = Context.PVMState.find ctxt in - match pvm_state with + let* block = Store.L2_blocks.find_full node_ctxt.store block_hash in + match block with + | None -> + failwith "Could not retrieve L2 block for %a" Block_hash.pp block_hash + | Some block -> + let* outbox = + match get_outbox_messages with | None -> return_none - | Some pvm_state -> - let*! outbox = - get_outbox_messages - node_ctxt - pvm_state - ~outbox_level:block.header.level - in - return_some outbox) - in - return - {block with content = {Sc_rollup_block.inbox; messages; commitment; outbox}} + | Some get_outbox_messages -> ( + let* ctxt = checkout_context node_ctxt block_hash in + let*! pvm_state = Context.PVMState.find ctxt in + match pvm_state with + | None -> return_none + | Some pvm_state -> + let*! outbox = + get_outbox_messages + node_ctxt + pvm_state + ~outbox_level:block.header.level + in + return_some outbox) + in + return Sc_rollup_block.{block with content = {block.content with outbox}} type proto_info = { proto_level : int; @@ -800,34 +730,19 @@ type proto_info = { let protocol_of_level_with_store (store : _ Store.t) level = let open Lwt_result_syntax in - let* protocols = Store.Protocols.read store.protocols in - let*? protocols = - match protocols with - | None | Some [] -> - error_with "Cannot infer protocol for level %ld: no protocol info" level - | Some protos -> Ok protos - in - let rec find = function - | [] -> - error_with "Cannot infer protocol for level %ld: no information" level - | {Store.Protocols.level = p_level; proto_level; protocol} :: protos -> ( - (* Latest protocols appear first in the list *) - match p_level with - | First_known l when level >= l -> - Ok {protocol; proto_level; first_level_of_protocol = false} - | Activation_level l when level > l -> - (* The block at the activation level is of the previous protocol, so - we are in the protocol that was activated at [l] only when the - level we query is after [l]. *) - Ok - { - protocol; - proto_level; - first_level_of_protocol = level = Int32.succ l; - } - | _ -> (find [@tailcall]) protos) - in - Lwt.return (find protocols) + let* p = Store.Protocols.proto_of_level store level in + match p with + | None -> + failwith + "Cannot infer protocol for level %ld. This block is likely before the \ + origination of the rollup. Make sure that the L1 octez node is \ + properfly synchronized before starting the rollup node." + level + | Some {level = First_known _; proto_level; protocol} -> + return {protocol; proto_level; first_level_of_protocol = false} + | Some {level = Activation_level l; proto_level; protocol} -> + return + {protocol; proto_level; first_level_of_protocol = level = Int32.succ l} let protocol_of_level node_ctxt level = assert (level >= node_ctxt.genesis_info.level) ; @@ -835,39 +750,32 @@ let protocol_of_level node_ctxt level = let last_seen_protocol node_ctxt = let open Lwt_result_syntax in - let+ protocols = Store.Protocols.read node_ctxt.store.protocols in - match protocols with - | None | Some [] -> None - | Some (p :: _) -> Some p.protocol + let+ p = Store.Protocols.last node_ctxt.store in + match p with None -> None | Some p -> Some p.protocol let protocol_activation_level node_ctxt protocol_hash = let open Lwt_result_syntax in - let* protocols = Store.Protocols.read node_ctxt.store.protocols in - match - Option.bind - protocols - (List.find_map (function Store.Protocols.{protocol; level; _} -> - if Protocol_hash.(protocol_hash = protocol) then Some level else None)) - with + let* p = Store.Protocols.find node_ctxt.store protocol_hash in + match p with + | Some p -> return p.level | None -> failwith "Could not determine the activation level of a previously unseen \ protocol %a" Protocol_hash.pp protocol_hash - | Some l -> return l let save_protocol_info node_ctxt (block : Layer1.header) ~(predecessor : Layer1.header) = let open Lwt_result_syntax in - let* protocols = Store.Protocols.read node_ctxt.store.protocols in - match protocols with - | Some ({proto_level; _} :: _) + let* last_protocol = Store.Protocols.last node_ctxt.store in + match last_protocol with + | Some {proto_level; _} when proto_level = block.header.proto_level && block.header.proto_level = predecessor.header.proto_level -> (* Nominal case, no protocol change. Nothing to do. *) return_unit - | None | Some [] -> + | None -> (* No protocols information saved in the rollup node yet, initialize with information by looking at the current head and its predecessor. We need to figure out if a protocol upgrade happened in one of these two blocks. @@ -917,10 +825,8 @@ let save_protocol_info node_ctxt (block : Layer1.header) in [proto_info; pred_proto_info] in - Store.Protocols.write node_ctxt.store.protocols protocols - | Some - ({proto_level = last_proto_level; _} :: previous_protocols as protocols) - -> + List.iter_es (Store.Protocols.store node_ctxt.store) protocols + | Some _ -> (* block.header.proto_level <> last_proto_level or head is a migration block, i.e. there is a protocol change w.r.t. last registered one. *) let is_head_migration_block = @@ -941,31 +847,7 @@ let save_protocol_info node_ctxt (block : Layer1.header) Store.Protocols. {level; proto_level = block.header.proto_level; protocol} in - let protocols = - if block.header.proto_level > last_proto_level then - (* Protocol upgrade, add new item to protocol list *) - proto_info :: protocols - else if block.header.proto_level < last_proto_level then ( - (* Reorganization in which a protocol migration block was - backtracked. *) - match previous_protocols with - | [] -> - (* No info further back, store what we know. *) - [proto_info] - | previous_proto :: _ -> - (* make sure that we are in the case where we backtracked the - migration block. *) - assert ( - Protocol_hash.(proto_info.protocol = previous_proto.protocol)) ; - (* Remove last stored protocol *) - previous_protocols) - else - (* block.header.proto_level = last_proto_level && is_migration_block *) - (* Reorganization where we are doing a different protocol - upgrade. Replace last stored protocol. *) - proto_info :: previous_protocols - in - Store.Protocols.write node_ctxt.store.protocols protocols + Store.Protocols.store node_ctxt.store proto_info let get_slot_header {store; _} ~published_in_block_hash slot_index = Error.trace_lwt_result_with @@ -973,54 +855,62 @@ let get_slot_header {store; _} ~published_in_block_hash slot_index = slot_index Block_hash.pp published_in_block_hash - @@ Store.Dal_slots_headers.get - store.irmin_store - ~primary_key:published_in_block_hash - ~secondary_key:slot_index + @@ + let open Lwt_result_syntax in + let* sh = + Store.Dal_slots_headers.find_slot_header + store + published_in_block_hash + ~slot_index + in + match sh with + | None -> + failwith + "No slot header stored for %a" + Block_hash.pp + published_in_block_hash + | Some sh -> return sh let get_all_slot_headers {store; _} ~published_in_block_hash = - Store.Dal_slots_headers.list_values - store.irmin_store - ~primary_key:published_in_block_hash + Store.Dal_slots_headers.list_slot_headers store published_in_block_hash let get_slot_indexes {store; _} ~published_in_block_hash = - Store.Dal_slots_headers.list_secondary_keys - store.irmin_store - ~primary_key:published_in_block_hash + Store.Dal_slots_headers.list_slot_indexes store published_in_block_hash let save_slot_header {store; _} ~published_in_block_hash (slot_header : Dal.Slot_header.t) = - Store.Dal_slots_headers.add - store.irmin_store - ~primary_key:published_in_block_hash - ~secondary_key:slot_header.id.index - slot_header + Store.Dal_slots_headers.store store published_in_block_hash slot_header let find_slot_status {store; _} ~confirmed_in_block_hash slot_index = - Store.Dal_slots_statuses.find - store.irmin_store - ~primary_key:confirmed_in_block_hash - ~secondary_key:slot_index + Store.Dal_slots_statuses.find_slot_status + store + confirmed_in_block_hash + ~slot_index let list_slots_statuses {store; _} ~confirmed_in_block_hash = - Store.Dal_slots_statuses.list_secondary_keys_with_values - store.irmin_store - ~primary_key:confirmed_in_block_hash + Store.Dal_slots_statuses.list_slot_statuses store confirmed_in_block_hash let save_slot_status {store; _} current_block_hash slot_index status = - Store.Dal_slots_statuses.add - store.irmin_store - ~primary_key:current_block_hash - ~secondary_key:slot_index - status - -let get_gc_info_aux node_ctxt step = - let store = + Store.Dal_slots_statuses.store store current_block_hash slot_index status + +type gc_level = {gc_triggered_at : int32; gc_target : int32} + +let get_gc_info_aux {store; _} step = + let open Lwt_result_syntax in + let* gc_triggered_at = + match step with + | `Started -> Store.State.Last_gc_triggered_at.get store + | `Successful -> Store.State.Last_successful_gc_triggered_at.get store + in + let* gc_target = match step with - | `Started -> node_ctxt.store.gc_levels - | `Successful -> node_ctxt.store.successful_gc_levels + | `Started -> Store.State.Last_gc_target.get store + | `Successful -> Store.State.Last_successful_gc_target.get store in - Store.Gc_levels.read store + match (gc_triggered_at, gc_target) with + | None, _ | _, None -> return_none + | Some gc_triggered_at, Some gc_target -> + return_some {gc_triggered_at; gc_target} let get_gc_info node_ctxt step = let open Lwt_result_syntax in @@ -1035,25 +925,23 @@ let get_gc_info node_ctxt step = let first_available_level node_ctxt = let open Lwt_result_syntax in - let+ gc_levels = get_gc_info node_ctxt `Started in - match gc_levels with - | Some {first_available_level; _} -> first_available_level - | None -> node_ctxt.genesis_info.level + let+ last_gc_target = Store.State.Last_gc_target.get node_ctxt.store in + Option.value last_gc_target ~default:node_ctxt.genesis_info.level let get_last_context_split_level node_ctxt = - Store.Last_context_split.read node_ctxt.store.last_context_split_level + Store.State.Last_context_split.get node_ctxt.store -let save_gc_info node_ctxt step ~at_level ~gc_level = +let save_gc_info {store; _} step ~at_level ~gc_level = let open Lwt_syntax in - let store = - match step with - | `Started -> node_ctxt.store.gc_levels - | `Successful -> node_ctxt.store.successful_gc_levels - in let* res = - Store.Gc_levels.write - store - {last_gc_level = at_level; first_available_level = gc_level} + let open Lwt_result_syntax in + match step with + | `Started -> + let* () = Store.State.Last_gc_target.set store gc_level in + Store.State.Last_gc_triggered_at.set store at_level + | `Successful -> + let* () = Store.State.Last_successful_gc_target.set store gc_level in + Store.State.Last_successful_gc_triggered_at.set store at_level in match res with | Error _ -> Event.gc_levels_storage_failure () @@ -1073,14 +961,11 @@ let splitting_period node_ctxt = Option.value node_ctxt.config.gc_parameters.context_splitting_period ~default let save_context_split_level node_ctxt level = - Store.Last_context_split.write node_ctxt.store.last_context_split_level level + Store.State.Last_context_split.set node_ctxt.store level let candidate_gc_level node_ctxt = let open Lwt_result_syntax in - let* history_mode = Store.History_mode.read node_ctxt.store.history_mode in - let history_mode = - Option.value history_mode ~default:Configuration.default_history_mode - in + let* history_mode = get_history_mode node_ctxt in match history_mode with | Archive -> (* Never call GC in archive mode *) @@ -1095,11 +980,11 @@ let gc ?(wait_finished = false) ?(force = false) node_ctxt ~(level : int32) = (* [gc_level] is the level corresponding to the hash on which GC will be called. *) let* gc_level = candidate_gc_level node_ctxt in - let* gc_info = get_gc_info node_ctxt `Successful in + let* last_gc_target = + Store.State.Last_successful_gc_target.get node_ctxt.store + in let last_gc_target = - match gc_info with - | Some {first_available_level; _} -> first_available_level - | None -> node_ctxt.genesis_info.level + Option.value last_gc_target ~default:node_ctxt.genesis_info.level in let frequency = match node_ctxt.config.gc_parameters.frequency_in_blocks with @@ -1110,17 +995,17 @@ let gc ?(wait_finished = false) ?(force = false) node_ctxt ~(level : int32) = | None -> return_unit | Some gc_level when (force || Int32.(sub gc_level last_gc_target >= frequency)) - && Context.is_gc_finished node_ctxt.context - && Store.is_gc_finished node_ctxt.store -> ( + && Context.is_gc_finished node_ctxt.context -> ( let* hash = hash_of_level node_ctxt gc_level in - let* header = Store.L2_blocks.header node_ctxt.store.l2_blocks hash in - match header with + let* context = Store.L2_blocks.find_context node_ctxt.store hash in + match context with | None -> failwith - "Could not retrieve L2 block header for %a" + "GC: Could not retrieve context for L2 block %a at %ld" Block_hash.pp hash - | Some {context; _} -> + gc_level + | Some context -> let start_timestamp = Time.System.now () in let* gc_lockfile = Lwt_lock_file.lock @@ -1135,13 +1020,13 @@ let gc ?(wait_finished = false) ?(force = false) node_ctxt ~(level : int32) = Metrics.GC.set_oldest_available_level gc_level) ; (* Start both node and context gc asynchronously *) let*! () = Context.gc node_ctxt.context context in - let* () = Store.gc node_ctxt.store ~level:gc_level in + let store_gc_promise = Store.gc node_ctxt.store ~level:gc_level in let gc_waiter () = let open Lwt_syntax in Lwt.finalize (fun () -> let* () = Context.wait_gc_completion node_ctxt.context - and* () = Store.wait_gc_completion node_ctxt.store in + and* (_ : unit tzresult) = store_gc_promise in Metrics.wrap (fun () -> let stop_timestamp = Time.System.now () in Metrics.GC.set_process_time @@ -1163,8 +1048,7 @@ let gc ?(wait_finished = false) ?(force = false) node_ctxt ~(level : int32) = let cancel_gc node_ctxt = let open Lwt_syntax in let canceled_context_gc = Context.cancel_gc node_ctxt.context in - let+ canceled_store_gc = Store.cancel_gc node_ctxt.store in - canceled_context_gc || canceled_store_gc + return canceled_context_gc let check_level_available node_ctxt accessed_level = let open Lwt_result_syntax in @@ -1191,8 +1075,8 @@ let wait_synchronized node_ctxt = (**/**) module Internal_for_tests = struct - let write_protocols_in_store (store : [> `Write] store) = - Store.Protocols.write store.Store.protocols + let write_protocols_in_store (store : [> `Write] store) protocols = + List.iter_es (Store.Protocols.store store) protocols let unsafe_get_store ctxt = ctxt.store end diff --git a/src/lib_smart_rollup_node/node_context.mli b/src/lib_smart_rollup_node/node_context.mli index a90a3f46c19d..db92360b1cae 100644 --- a/src/lib_smart_rollup_node/node_context.mli +++ b/src/lib_smart_rollup_node/node_context.mli @@ -27,7 +27,7 @@ (** This module describes the execution context of the node. *) -type lcc = Store.Lcc.lcc = {commitment : Commitment.Hash.t; level : int32} +type lcc = {commitment : Commitment.Hash.t; level : int32} type genesis_info = Metadata.genesis_info = { level : int32; @@ -39,18 +39,12 @@ type 'a store constraint 'a = [< `Read | `Write > `Read] (** Exposed functions to manipulate Node_context store outside of this module *) module Node_store : sig - (** [load mode ~index_buffer_size ~l2_blocks_cache_size directory] - loads a store form the data persisted [directory] as described in - {!Store_sigs.load} *) - val load : - 'a Store_sigs.mode -> - index_buffer_size:int -> - l2_blocks_cache_size:int -> - string -> - 'a store tzresult Lwt.t + (** [load mode ~data_dir] loads a store form the data persisted in [data_dir] + as described in. *) + val load : 'a Store_sigs.mode -> data_dir:string -> 'a store tzresult Lwt.t - (** [close_store store] closes the store *) - val close : 'a store -> unit tzresult Lwt.t + (** [close store] closes the store *) + val close : 'a store -> unit Lwt.t (** [check_and_set_history_mode store history_mode] checks the compatibility between given history mode and that of the store. @@ -363,14 +357,14 @@ val save_commitment : rw -> Commitment.t -> Commitment.Hash.t tzresult Lwt.t val commitment_published_at_level : _ t -> Commitment.Hash.t -> - Store.Commitments_published_at_level.element option tzresult Lwt.t + Store.Commitments_published_at_levels.publication_levels option tzresult Lwt.t (** [save_commitment_published_at_level t hash levels] saves the publication/inclusion information for a commitment with [hash]. *) val set_commitment_published_at_level : rw -> Commitment.Hash.t -> - Store.Commitments_published_at_level.element -> + Store.Commitments_published_at_levels.publication_levels -> unit tzresult Lwt.t type commitment_source = Anyone | Us @@ -430,25 +424,26 @@ val inbox_of_head : val get_inbox_by_block_hash : _ t -> Block_hash.t -> Octez_smart_rollup.Inbox.t tzresult Lwt.t -(** Returns messages as they are stored in the store, unsafe to use because all - messages may not be present. Use {!Messages.get} instead. *) -val unsafe_find_stored_messages : - _ t -> - Merkelized_payload_hashes_hash.t -> - (string list * Block_hash.t) option tzresult Lwt.t +(** Returns messages for a payload hash, including protocol messages. *) +val find_messages : + _ t -> Merkelized_payload_hashes_hash.t -> string list option tzresult Lwt.t + +(** Same as {!find_messages} but fails if not messages are stored. *) +val get_messages : + _ t -> Merkelized_payload_hashes_hash.t -> string list tzresult Lwt.t (** [get_num_messages t witness_hash] retrieves the number of messages for the inbox witness [witness_hash] stored by the rollup node. *) val get_num_messages : _ t -> Merkelized_payload_hashes_hash.t -> int tzresult Lwt.t -(** [save_messages t payloads_hash ~predecessor messages] associates the list of +(** [save_messages t payloads_hash ~level messages] associates the list of [messages] to the [payloads_hash]. The payload hash must be computed by calling, e.g. {!Sc_rollup.Inbox.add_all_messages}. *) val save_messages : rw -> Merkelized_payload_hashes_hash.t -> - predecessor:Block_hash.t -> + level:int32 -> string list -> unit tzresult Lwt.t @@ -470,18 +465,11 @@ type proto_info = { val set_outbox_message_executed : rw -> outbox_level:int32 -> index:int -> unit tzresult Lwt.t -(** [register_new_outbox_messages node_ctxt ~outbox_level ~indexes] registers - the messages indexes for the [outbox_level]. If messages were already - registered for this level, they are overwritten. Messages added are first - marked unexecuted. *) -val register_new_outbox_messages : - rw -> outbox_level:int32 -> indexes:int list -> unit tzresult Lwt.t - -(** [register_missing_outbox_messages node_ctxt ~outbox_level ~indexes] - registers the messages indexes for the [outbox_level]. If messages were - already registered for this level, they are not overwritten. This function - is meant to be used to recompute missing outbox messages information. *) -val register_missing_outbox_messages : +(** [register_outbox_messages node_ctxt ~outbox_level ~indexes] registers the + messages indexes for the [outbox_level]. If messages were already registered + for this level, they are overwritten. Messages marked as executed are + preserved. *) +val register_outbox_messages : rw -> outbox_level:int32 -> indexes:int list -> unit tzresult Lwt.t (** Returns the pending messages (i.e. unexecuted) that can now be executed. @@ -602,14 +590,14 @@ val gc : canceled. *) val cancel_gc : rw -> bool Lwt.t +type gc_level = {gc_triggered_at : int32; gc_target : int32} + (** [get_gc_info node_ctxt step] returns information about the garbage collected levels. If [step] is [`Started], it returns information for the last started GC and if it's [`Successful], it returns information for the last successful GC. *) val get_gc_info : - _ t -> - [`Started | `Successful] -> - Store.Gc_levels.levels option tzresult Lwt.t + _ t -> [`Started | `Successful] -> gc_level option tzresult Lwt.t (** The first non garbage collected level available in the node. *) val first_available_level : _ t -> int32 tzresult Lwt.t @@ -651,7 +639,7 @@ val wait_synchronized : _ t -> unit Lwt.t module Internal_for_tests : sig val write_protocols_in_store : - [> `Write] store -> Store.Protocols.value -> unit tzresult Lwt.t + [> `Write] store -> Store.Protocols.proto_info list -> unit tzresult Lwt.t (** Extract the underlying store from the node context. This function is unsafe to use outside of tests as it breaks the abstraction barrier diff --git a/src/lib_smart_rollup_node/node_context_loader.ml b/src/lib_smart_rollup_node/node_context_loader.ml index d1027701c889..5055b9b2f8ad 100644 --- a/src/lib_smart_rollup_node/node_context_loader.ml +++ b/src/lib_smart_rollup_node/node_context_loader.ml @@ -64,15 +64,11 @@ let create_sync_info () = } let init (cctxt : #Client_context.full) ~data_dir ~irmin_cache_size - ~index_buffer_size ?log_kernel_debug_file ?last_whitelist_update mode - l1_ctxt genesis_info ~(lcc : lcc) ~lpc kind current_protocol + ?log_kernel_debug_file ?last_whitelist_update mode l1_ctxt genesis_info + ~(lcc : lcc) ~lpc kind current_protocol Configuration.( - { - sc_rollup_address = rollup_address; - l2_blocks_cache_size; - dal_node_endpoint; - _; - } as configuration) = + {sc_rollup_address = rollup_address; dal_node_endpoint; _} as + configuration) = let open Lwt_result_syntax in let* lockfile = lock ~data_dir in let metadata = @@ -93,13 +89,7 @@ let init (cctxt : #Client_context.full) ~data_dir ~irmin_cache_size let dal_cctxt = Option.map Dal_node_client.make_unix_cctxt dal_node_endpoint in - let* store = - Node_context.Node_store.load - mode - ~index_buffer_size - ~l2_blocks_cache_size - Configuration.(default_storage_dir data_dir) - in + let* store = Node_context.Node_store.load mode ~data_dir in let*? (module Plugin : Protocol_plugin_sig.S) = Protocol_plugins.proto_plugin_for_protocol current_protocol.hash in @@ -206,7 +196,7 @@ let close ({cctxt; store; context; l1_ctxt; finaliser; _} as node_ctxt) = let*! () = message "Closing context@." in let*! () = Context.close context in let*! () = message "Closing store@." in - let* () = Node_context.Node_store.close store in + let*! () = Node_context.Node_store.close store in let*! () = message "Releasing lock@." in let*! () = unlock node_ctxt in return_unit @@ -287,17 +277,17 @@ module For_snapshots = struct ~l1_blocks_cache_size cctxt in - let* lcc = Store.Lcc.read store.Store.lcc in + let* lcc = Store.State.LCC.get store in let lcc = match lcc with - | Some lcc -> lcc + | Some (commitment, level) -> {commitment; level} | None -> { commitment = metadata.genesis_info.commitment_hash; level = metadata.genesis_info.level; } in - let* lpc = Store.Lpc.read store.Store.lpc in + let* lpc = Store.Commitments.find_lpc store in let*! lockfile = Lwt_unix.openfile (Filename.temp_file "lock" "") [] 0o644 in @@ -395,13 +385,7 @@ module Internal_for_tests = struct } in let* lockfile = lock ~data_dir in - let* store = - Node_context.Node_store.load - Read_write - ~index_buffer_size - ~l2_blocks_cache_size - Configuration.(default_storage_dir data_dir) - in + let* store = Node_context.Node_store.load Read_write ~data_dir in let*? (module Plugin : Protocol_plugin_sig.S) = Protocol_plugins.proto_plugin_for_protocol current_protocol.hash in @@ -506,6 +490,6 @@ module Internal_for_tests = struct create_node_context cctxt current_protocol ~data_dir Wasm_2_0_0 in let*! () = Context.close node_ctxt.context in - let* () = Node_context.Node_store.close node_ctxt.store in + let*! () = Node_context.Node_store.close node_ctxt.store in return node_ctxt end diff --git a/src/lib_smart_rollup_node/node_context_loader.mli b/src/lib_smart_rollup_node/node_context_loader.mli index 31254ac28571..3bc2ac026fab 100644 --- a/src/lib_smart_rollup_node/node_context_loader.mli +++ b/src/lib_smart_rollup_node/node_context_loader.mli @@ -39,7 +39,6 @@ val init : #Client_context.full -> data_dir:string -> irmin_cache_size:int -> - index_buffer_size:int -> ?log_kernel_debug_file:string -> ?last_whitelist_update:Z.t * Int32.t -> 'a Store_sigs.mode -> diff --git a/src/lib_smart_rollup_node/repair.ml b/src/lib_smart_rollup_node/repair.ml index 3a6cb3d49466..49e65762294f 100644 --- a/src/lib_smart_rollup_node/repair.ml +++ b/src/lib_smart_rollup_node/repair.ml @@ -255,7 +255,7 @@ let recompute_outbox_messages node_ctxt first_level = level (List.length outbox_messages) ; let indexes = List.map fst outbox_messages in - Node_context.register_missing_outbox_messages + Node_context.register_outbox_messages node_ctxt ~outbox_level:level ~indexes) diff --git a/src/lib_smart_rollup_node/rollup_node_daemon.ml b/src/lib_smart_rollup_node/rollup_node_daemon.ml index 1777d09e5762..3ef920c4b1d9 100644 --- a/src/lib_smart_rollup_node/rollup_node_daemon.ml +++ b/src/lib_smart_rollup_node/rollup_node_daemon.ml @@ -124,7 +124,7 @@ let register_outbox_messages (module Plugin : Protocol_plugin_sig.S) node_ctxt | [] -> return_unit | _ -> let indexes = List.map fst outbox_messages in - Node_context.register_new_outbox_messages + Node_context.register_outbox_messages node_ctxt ~outbox_level:level ~indexes @@ -782,7 +782,7 @@ let plugin_of_first_block cctxt (block : Layer1.header) = let*? plugin = Protocol_plugins.proto_plugin_for_protocol current_protocol in return (current_protocol, plugin) -let run ~data_dir ~irmin_cache_size ~index_buffer_size ?log_kernel_debug_file +let run ~data_dir ~irmin_cache_size ?log_kernel_debug_file (configuration : Configuration.t) (cctxt : Client_context.full) = let open Lwt_result_syntax in let* () = @@ -869,7 +869,6 @@ let run ~data_dir ~irmin_cache_size ~index_buffer_size ?log_kernel_debug_file cctxt ~data_dir ~irmin_cache_size - ~index_buffer_size ?log_kernel_debug_file Read_write l1_ctxt diff --git a/src/lib_smart_rollup_node/rpc_directory.ml b/src/lib_smart_rollup_node/rpc_directory.ml index 376a75109152..47ec5032a853 100644 --- a/src/lib_smart_rollup_node/rpc_directory.ml +++ b/src/lib_smart_rollup_node/rpc_directory.ml @@ -430,19 +430,14 @@ let () = and* last_context_split_level = Node_context.get_last_context_split_level node_ctxt in - let first_available_level = + let first_available_level, last_gc_started_at = match started_gc_info with - | Some {first_available_level = l; _} -> l - | None -> node_ctxt.genesis_info.level - in - let last_gc_started_at = - match started_gc_info with - | Some {last_gc_level; _} -> Some last_gc_level - | None -> None + | Some gc -> (gc.gc_target, Some gc.gc_triggered_at) + | None -> (node_ctxt.genesis_info.level, None) in let last_successful_gc_target = match successful_gc_info with - | Some {first_available_level = l; _} -> Some l + | Some {gc_target = l; _} -> Some l | None -> None in return diff --git a/src/lib_smart_rollup_node/snapshots.ml b/src/lib_smart_rollup_node/snapshots.ml index 15b2ad122139..c7a430b47669 100644 --- a/src/lib_smart_rollup_node/snapshots.ml +++ b/src/lib_smart_rollup_node/snapshots.ml @@ -70,7 +70,7 @@ let check_store_version store_dir = let get_head (store : _ Store.t) = let open Lwt_result_syntax in - let* head = Store.L2_head.read store.l2_head in + let* head = Store.L2_blocks.find_head store in let*? head = match head with | None -> @@ -120,10 +120,7 @@ let pre_export_checks_and_get_snapshot_header cctxt ~no_checks ~data_dir = | Some m -> Ok m in let*? () = Context.Version.check metadata.context_version in - let* store = - Store.load Read_only ~index_buffer_size:0 ~l2_blocks_cache_size:1 store_dir - in - + let* store = Store.init Read_only ~data_dir in let* head = get_head store in let level = head.Sc_rollup_block.header.level in let* (module Plugin) = @@ -131,7 +128,7 @@ let pre_export_checks_and_get_snapshot_header cctxt ~no_checks ~data_dir = in let (module C) = Plugin.Pvm.context metadata.kind in let* context = Context.load (module C) ~cache_size:1 Read_only context_dir in - let* history_mode = Store.History_mode.read store.history_mode in + let* history_mode = Store.State.History_mode.get store in let*? history_mode = match history_mode with | None -> error_with "No history mode information in %S." data_dir @@ -143,10 +140,8 @@ let pre_export_checks_and_get_snapshot_header cctxt ~no_checks ~data_dir = in let* () = unless no_checks @@ fun () -> - let* last_commitment = - Store.Commitments.read store.commitments last_commitment_hash - in - let last_commitment, () = + let* last_commitment = Store.Commitments.find store last_commitment_hash in + let last_commitment = WithExceptions.Option.get ~loc:__LOC__ last_commitment in (* Check if predecessor commitment exist on chain as a safety measure, @@ -159,7 +154,7 @@ let pre_export_checks_and_get_snapshot_header cctxt ~no_checks ~data_dir = in (* Closing context and stores after checks *) let*! () = Context.close context in - let* () = Store.close store in + let*! () = Store.close store in return { Header.version = V0; @@ -171,9 +166,9 @@ let pre_export_checks_and_get_snapshot_header cctxt ~no_checks ~data_dir = let first_available_level ~data_dir store = let open Lwt_result_syntax in - let* gc_levels = Store.Gc_levels.read store.Store.gc_levels in - match gc_levels with - | Some {first_available_level; _} -> return first_available_level + let* first_available_level = Store.State.Last_gc_target.get store in + match first_available_level with + | Some first_available_level -> return first_available_level | None -> ( let* metadata = Metadata.read_metadata_file ~dir:data_dir in match metadata with @@ -187,20 +182,18 @@ let check_some hash what = function let check_block_data_and_get_content (store : _ Store.t) context hash = let open Lwt_result_syntax in - let* b = Store.L2_blocks.read store.l2_blocks hash in - let*? _b, header = check_some hash "L2 block" b in - let* messages = Store.Messages.read store.messages header.inbox_witness in - let*? _messages, _ = check_some hash "messages" messages in - let* inbox = Store.Inboxes.read store.inboxes header.inbox_hash in - let*? inbox, () = check_some hash "inbox" inbox in + let* b = Store.L2_blocks.find store hash in + let*? {header; _} = check_some hash "L2 block" b in + let* messages = Store.Messages.find store header.inbox_witness in + let*? _level, _messages = check_some hash "messages" messages in + let* inbox = Store.Inboxes.find store header.inbox_hash in + let*? inbox = check_some hash "inbox" inbox in let* commitment = match header.commitment_hash with | None -> return_none | Some commitment_hash -> - let* commitment = - Store.Commitments.read store.commitments commitment_hash - in - let*? commitment, () = check_some hash "commitment" commitment in + let* commitment = Store.Commitments.find store commitment_hash in + let*? commitment = check_some hash "commitment" commitment in return_some commitment in (* Ensure head context is available. *) @@ -444,31 +437,27 @@ let check_lcc metadata cctxt (store : _ Store.t) (head : Sc_rollup_block.t) (* The snapshot is older than the current LCC *) return_unit else - let* lcc_block_hash = - Store.Levels_to_hashes.find store.levels_to_hashes lcc.level - in + let* lcc_block_hash = Store.L2_levels.find store lcc.level in let*? lcc_block_hash = match lcc_block_hash with | None -> error_with "No block for LCC level %ld" lcc.level | Some h -> Ok h in - let* lcc_block_header = - Store.L2_blocks.header store.l2_blocks lcc_block_hash - in - match lcc_block_header with + let* lcc_block = Store.L2_blocks.find store lcc_block_hash in + match lcc_block with | None -> failwith "Unknown block %a for LCC level %ld" Block_hash.pp lcc_block_hash lcc.level - | Some {commitment_hash = None; _} -> + | Some {header = {commitment_hash = None; _}; _} -> failwith "No commitment for block %a for LCC level %ld" Block_hash.pp lcc_block_hash lcc.level - | Some {commitment_hash = Some commitment_hash; _} -> + | Some {header = {commitment_hash = Some commitment_hash; _}; _} -> fail_unless Commitment.Hash.(lcc.commitment = commitment_hash) @@ error_of_fmt "Snapshot contains %a for LCC at level %ld but was expected to be \ @@ -487,7 +476,9 @@ let reconstruct_level_context rollup_ctxt ~predecessor let open Lwt_result_syntax in let* block = Node_context.get_l2_block_by_level node_ctxt level in let* inbox = Node_context.get_inbox node_ctxt block.header.inbox_hash - and* messages = Messages.get node_ctxt block.header.inbox_witness in + and* messages = + Node_context.get_messages node_ctxt block.header.inbox_witness + in let* (module Plugin) = Protocol_plugins.proto_plugin_for_level node_ctxt level in @@ -510,13 +501,7 @@ let with_modify_data_dir cctxt ~data_dir ~apply_unsafe_patches let store_dir = Configuration.default_storage_dir data_dir in let context_dir = Configuration.default_context_dir data_dir in let* () = check_store_version store_dir in - let* store = - Store.load - Read_write - ~index_buffer_size:1000 - ~l2_blocks_cache_size:100 - store_dir - in + let* store = Store.init Read_write ~data_dir in let* head = get_head store in let* (module Plugin) = Protocol_plugins.proto_plugin_for_level_with_store store head.header.level @@ -562,7 +547,7 @@ let with_modify_data_dir cctxt ~data_dir ~apply_unsafe_patches in let* () = f node_ctxt ~head in let*! () = Context.close context in - let* () = Store.close store in + let*! () = Store.close store in return_unit let reconstruct_context_from_first_available_level @@ -612,13 +597,7 @@ let post_checks ?(apply_unsafe_patches = false) ~action ~message snapshot_header let context_dir = Configuration.default_context_dir dest in (* Load context and stores in read-only to run checks. *) let* () = check_store_version store_dir in - let* store = - Store.load - Read_only - ~index_buffer_size:1000 - ~l2_blocks_cache_size:100 - store_dir - in + let* store = Store.init Read_only ~data_dir:dest in let* head = get_head store in let* (module Plugin) = Protocol_plugins.proto_plugin_for_level_with_store store head.header.level @@ -658,7 +637,7 @@ let post_checks ?(apply_unsafe_patches = false) ~action ~message snapshot_header check_l2_chain ~message ~data_dir:dest store context head check_block_data in let*! () = Context.close context in - let* () = Store.close store in + let*! () = Store.close store in return_unit let post_export_checks ~snapshot_file = @@ -833,9 +812,7 @@ let export_compact cctxt ~no_checks ~compression ~data_dir ~dest ~filename = let*! () = Lwt_utils_unix.create_dir tmp_store_dir in let store_dir = Configuration.default_storage_dir data_dir in let context_dir = Configuration.default_context_dir data_dir in - let* store = - Store.load Read_only ~index_buffer_size:0 ~l2_blocks_cache_size:1 store_dir - in + let* store = Store.init Read_only ~data_dir in let* metadata = Metadata.read_metadata_file ~dir:data_dir in let*? metadata = match metadata with @@ -850,21 +827,18 @@ let export_compact cctxt ~no_checks ~compression ~data_dir ~dest ~filename = let (module C) = Plugin.Pvm.context metadata.kind in let* context = Context.load (module C) ~cache_size:1 Read_only context_dir in let* first_level = first_available_level ~data_dir store in - let* first_block_hash = - Store.Levels_to_hashes.find store.levels_to_hashes first_level - in - let first_block_hash = - WithExceptions.Option.get first_block_hash ~loc:__LOC__ - in - let* first_block = Store.L2_blocks.read store.l2_blocks first_block_hash in - let _, first_block = WithExceptions.Option.get first_block ~loc:__LOC__ in + let* first_block = Store.L2_blocks.find_by_level store first_level in + let first_block = WithExceptions.Option.get first_block ~loc:__LOC__ in let* () = Progress_bar.Lwt.with_background_spinner ~message: (Format.sprintf "Exporting context snapshot with first level %ld" first_level) - @@ Context.export_snapshot context first_block.context ~path:tmp_context_dir + @@ Context.export_snapshot + context + first_block.header.context + ~path:tmp_context_dir in let ( // ) = Filename.concat in (* TODO: https://gitlab.com/tezos/tezos/-/issues/6857 @@ -901,19 +875,12 @@ let export_compact cctxt ~no_checks ~compression ~data_dir ~dest ~filename = let pre_import_checks cctxt ~no_checks ~data_dir (snapshot_header : Header.t) = let open Lwt_result_syntax in - let store_dir = Configuration.default_storage_dir data_dir in (* Load stores in read-only to make simple checks. *) - let* store = - Store.load - Read_write - ~index_buffer_size:1000 - ~l2_blocks_cache_size:100 - store_dir - in - let* metadata = Metadata.read_metadata_file ~dir:data_dir - and* history_mode = Store.History_mode.read store.history_mode - and* head = Store.L2_head.read store.l2_head in - let* () = Store.close store in + let* store = Store.init Read_write ~data_dir in + let* metadata = Metadata.read_metadata_file ~dir:data_dir in + let* history_mode = Store.State.History_mode.get store in + let* head = Store.L2_blocks.find_head store in + let*! () = Store.close store in let*? () = let open Result_syntax in match (metadata, history_mode) with @@ -991,16 +958,8 @@ let correct_history_mode ~data_dir (snapshot_header : Header.t) assert false | Some Archive, Archive | Some Full, Full -> return_unit | Some Full, Archive -> - let* hist_store = - Store.History_mode.load - ~path: - (Filename.concat - (Configuration.default_storage_dir data_dir) - "history_mode") - Read_write - in - let* () = Store.History_mode.write hist_store Full in - return_unit + let* store = Store.init Read_write ~data_dir in + Store.State.History_mode.set store Full let import ~apply_unsafe_patches ~no_checks ~force cctxt ~data_dir ~snapshot_file = diff --git a/src/lib_smart_rollup_node/store.ml b/src/lib_smart_rollup_node/store.ml index 9ae6924d14d4..14d957b86e5a 100644 --- a/src/lib_smart_rollup_node/store.ml +++ b/src/lib_smart_rollup_node/store.ml @@ -24,4 +24,4 @@ (* *) (*****************************************************************************) -include Store_v4 +include Store_v5 diff --git a/src/lib_smart_rollup_node/store_migration.ml b/src/lib_smart_rollup_node/store_migration.ml index 4b6f3b0b9a33..fd4fce37def6 100644 --- a/src/lib_smart_rollup_node/store_migration.ml +++ b/src/lib_smart_rollup_node/store_migration.ml @@ -455,7 +455,7 @@ module V3_migrations = struct storage_dir // "levels_to_hashes" let recompute_level (v3_store : _ Store_v3.t) (l2_block : Sc_rollup_block.t) = - Store.Levels_to_hashes.add + Store_v3.Levels_to_hashes.add ~flush:true v3_store.levels_to_hashes l2_block.header.level diff --git a/src/lib_smart_rollup_node/store_v5.ml b/src/lib_smart_rollup_node/store_v5.ml new file mode 100644 index 000000000000..89fc0caf1c2e --- /dev/null +++ b/src/lib_smart_rollup_node/store_v5.ml @@ -0,0 +1,10 @@ +(*****************************************************************************) +(* *) +(* SPDX-License-Identifier: MIT *) +(* Copyright (c) 2024 Functori *) +(* *) +(*****************************************************************************) + +let version = Store_version.V5_sqlite + +include Sql_store diff --git a/src/lib_smart_rollup_node/store_version.ml b/src/lib_smart_rollup_node/store_version.ml index 357eef511d57..03e40acfa2c9 100644 --- a/src/lib_smart_rollup_node/store_version.ml +++ b/src/lib_smart_rollup_node/store_version.ml @@ -23,23 +23,31 @@ (* *) (*****************************************************************************) -type t = V0 | V1 | V2 | V3 | V4 +type t = V0 | V1 | V2 | V3 | V4 | V5_sqlite let pp ppf v = Format.pp_print_string ppf @@ - match v with V0 -> "v0" | V1 -> "v1" | V2 -> "v2" | V3 -> "v3" | V4 -> "v4" + match v with + | V0 -> "v0" + | V1 -> "v1" + | V2 -> "v2" + | V3 -> "v3" + | V4 -> "v4" + | V5_sqlite -> "v5_sqlite" let encoding = let open Data_encoding in conv - (function V0 -> 0 | V1 -> 1 | V2 -> 2 | V3 -> 3 | V4 -> 4) + (function + | V0 -> 0 | V1 -> 1 | V2 -> 2 | V3 -> 3 | V4 -> 4 | V5_sqlite -> 5) (function | 0 -> V0 | 1 -> V1 | 2 -> V2 | 3 -> V3 | 4 -> V4 + | 5 -> V5_sqlite | v -> Format.ksprintf Stdlib.failwith "Unsupported store version %d" v) (obj1 (req "store_version" int31)) diff --git a/src/lib_smart_rollup_node/store_version.mli b/src/lib_smart_rollup_node/store_version.mli index d4d78d8deee0..2a8b01ad1ed3 100644 --- a/src/lib_smart_rollup_node/store_version.mli +++ b/src/lib_smart_rollup_node/store_version.mli @@ -23,7 +23,7 @@ (* *) (*****************************************************************************) -type t = V0 | V1 | V2 | V3 | V4 +type t = V0 | V1 | V2 | V3 | V4 | V5_sqlite (** Pretty-printer for store versions *) val pp : Format.formatter -> t -> unit diff --git a/src/lib_smart_rollup_node/test/test_store_gc.ml b/src/lib_smart_rollup_node/test/test_store_gc.ml index e483ec4c6e55..73b969104fc8 100644 --- a/src/lib_smart_rollup_node/test/test_store_gc.ml +++ b/src/lib_smart_rollup_node/test/test_store_gc.ml @@ -19,9 +19,9 @@ let build_chain node_ctxt ~genesis ~length = return (genesis :: blocks) let check_raw_read name store ~gc_level (block : Sc_rollup_block.t) - ?(should_exist = block.header.level >= gc_level) proj read key = + ?(should_exist = block.header.level >= gc_level) read key = let open Lwt_result_syntax in - let+ res = read (proj store) key in + let+ res = read store key in match res with | None when should_exist -> Assert.fail_msg @@ -49,8 +49,7 @@ let check_chain_ok ~gc_level node_ctxt store chain = store ~gc_level block - (fun s -> s.Store.l2_blocks) - Store.L2_blocks.read + Store.L2_blocks.find block.header.block_hash in let* () = @@ -59,8 +58,7 @@ let check_chain_ok ~gc_level node_ctxt store chain = store ~gc_level block - (fun s -> s.Store.messages) - Store.Messages.read + Store.Messages.find block.header.inbox_witness in let* () = @@ -69,8 +67,7 @@ let check_chain_ok ~gc_level node_ctxt store chain = store ~gc_level block - (fun s -> s.Store.inboxes) - Store.Inboxes.read + Store.Inboxes.find block.header.inbox_hash in let* () = @@ -82,8 +79,7 @@ let check_chain_ok ~gc_level node_ctxt store chain = store ~gc_level block - (fun s -> s.Store.commitments) - Store.Commitments.read + Store.Commitments.find commitment_hash in let* () = @@ -92,8 +88,7 @@ let check_chain_ok ~gc_level node_ctxt store chain = store ~gc_level block - (fun s -> s.Store.levels_to_hashes) - Store.Levels_to_hashes.find + Store.L2_levels.find block.header.level in (* Checking access through Node_context *) @@ -132,9 +127,9 @@ let gc_test node_ctxt ~genesis = let* chain = build_chain node_ctxt ~genesis ~length in (* Garbage collecting everything below level 50 *) let store = Node_context.Internal_for_tests.unsafe_get_store node_ctxt in - let* () = Store.gc store ~level:gc_level in + let gc = Store.gc store ~level:gc_level in let* last_block = Helpers.append_l2_block node_ctxt ["\001I'm new"] in - let*! () = Store.wait_gc_completion store in + let* () = gc in (* Checking result of GC *) let* () = check_chain_ok ~gc_level node_ctxt store (chain @ [last_block]) in return_unit @@ -169,13 +164,13 @@ let gc_test_reorg node_ctxt ~genesis = in (* Garbage collecting everything below level 50 *) let store = Node_context.Internal_for_tests.unsafe_get_store node_ctxt in - let* () = Store.gc store ~level:gc_level in + let gc = Store.gc store ~level:gc_level in (* Trigger a reorganization by adding a new block on top of the alternative head. *) let* last_block = Helpers.add_l2_block node_ctxt ["\001Reorged"] ~predecessor_l2_block:head in - let*! () = Store.wait_gc_completion store in + let* () = gc in (* Ensure both forked blocks are available *) let* reorged_block_by_hash = Node_context.find_l2_block node_ctxt head.header.block_hash diff --git a/src/lib_smart_rollup_node/wasm_2_0_0_utilities.ml b/src/lib_smart_rollup_node/wasm_2_0_0_utilities.ml index 333805218552..aeaf8fff7134 100644 --- a/src/lib_smart_rollup_node/wasm_2_0_0_utilities.ml +++ b/src/lib_smart_rollup_node/wasm_2_0_0_utilities.ml @@ -13,12 +13,10 @@ let load_context ~data_dir (module Plugin : Protocol_plugin_sig.S) mode = mode (Configuration.default_context_dir data_dir) -(** [get_wasm_pvm_state ~l2_header data_dir] reads the WASM PVM state in - [data_dir] for the given [l2_header].*) -let get_wasm_pvm_state ~(l2_header : Sc_rollup_block.header) context = +(** [get_wasm_pvm_state ctxt block_hash context_hash] reads the WASM PVM state + in [ctxt] for the given [context_hash].*) +let get_wasm_pvm_state context block_hash context_hash = let open Lwt_result_syntax in - let context_hash = l2_header.context in - let block_hash = l2_header.block_hash in (* Now, we can checkout the state of the rollup of the given block hash *) let*! ctxt = Context.checkout context context_hash in let* ctxt = @@ -95,13 +93,7 @@ let generate_durable_storage ~(plugin : (module Protocol_plugin_sig.S)) tree = let dump_durable_storage ~block ~data_dir ~file = let open Lwt_result_syntax in - let* store = - Store.load - Tezos_layer2_store.Store_sigs.Read_only - ~index_buffer_size:0 - ~l2_blocks_cache_size:5 - (Configuration.default_storage_dir data_dir) - in + let* store = Store.init Read_only ~data_dir in let get name load = let* value = load () in match value with @@ -110,33 +102,31 @@ let dump_durable_storage ~block ~data_dir ~file = in let hash_from_level l = get (Format.asprintf "Block hash for level %ld" l) (fun () -> - Store.Levels_to_hashes.find store.levels_to_hashes l) + Store.L2_levels.find store l) in let block_from_hash h = get (Format.asprintf "Block with hash %a" Block_hash.pp h) (fun () -> - Store.L2_blocks.read store.l2_blocks h) + Store.L2_blocks.find store h) in let get_l2_head () = - get "Processed L2 head" (fun () -> Store.L2_head.read store.l2_head) + get "Processed L2 head" (fun () -> Store.State.L2_head.get store) in let* block_hash, block_level = match block with | `Genesis -> failwith "Genesis not supported" - | `Head 0 -> - let* {header = {block_hash; level; _}; _} = get_l2_head () in - return (block_hash, level) + | `Head 0 -> get_l2_head () | `Head offset -> - let* {header = {level; _}; _} = get_l2_head () in + let* _, level = get_l2_head () in let l = Int32.(sub level (of_int offset)) in let* h = hash_from_level l in return (h, l) | `Alias (_, _) -> failwith "Alias not supported" | `Hash (h, 0) -> - let* _block, {block_hash; level; _} = block_from_hash h in + let* {header = {block_hash; level; _}; _} = block_from_hash h in return (block_hash, level) | `Hash (h, offset) -> - let* _block, block_header = block_from_hash h in - let l = Int32.(sub block_header.level (of_int offset)) in + let* block = block_from_hash h in + let l = Int32.(sub block.header.level (of_int offset)) in let* h = hash_from_level l in return (h, l) | `Level l -> @@ -146,14 +136,14 @@ let dump_durable_storage ~block ~data_dir ~file = let* (plugin : (module Protocol_plugin_sig.S)) = Protocol_plugins.proto_plugin_for_level_with_store store block_level in - let* l2_header = Store.L2_blocks.header store.l2_blocks block_hash in - let* l2_header = - match l2_header with + let* context_hash = Store.L2_blocks.find_context store block_hash in + let* context_hash = + match context_hash with | None -> tzfail Rollup_node_errors.Cannot_checkout_l2_header - | Some header -> return header + | Some c -> return c in let* context = load_context ~data_dir plugin Store_sigs.Read_only in - let* state = get_wasm_pvm_state ~l2_header context in + let* state = get_wasm_pvm_state context block_hash context_hash in let* instrs = generate_durable_storage ~plugin state in let* () = Installer_config.to_file instrs ~output:file in return_unit @@ -162,37 +152,24 @@ let patch_durable_storage ~data_dir ~key ~value = let open Lwt_result_syntax in (* Loads the state of the head. *) let* _lock = Node_context_loader.lock ~data_dir in - let* store = - Store.load - Tezos_layer2_store.Store_sigs.Read_write - ~index_buffer_size:0 - ~l2_blocks_cache_size:1 - (Configuration.default_storage_dir data_dir) - in + let* store = Store.init Read_write ~data_dir in let* ({header = {block_hash; level = block_level; _}; _} as l2_block) = - let* r = Store.L2_head.read store.l2_head in + let* r = Store.L2_blocks.find_head store in match r with | Some v -> return v | None -> failwith "Processed L2 head is not found in the rollup node storage" in - let* ((module Plugin) as plugin) = Protocol_plugins.proto_plugin_for_level_with_store store block_level in - let* l2_header = Store.L2_blocks.header store.l2_blocks block_hash in - let* l2_header = - match l2_header with - | None -> tzfail Rollup_node_errors.Cannot_checkout_l2_header - | Some header -> return header - in let* () = fail_when - (Option.is_some l2_header.commitment_hash) + (Option.is_some l2_block.header.commitment_hash) (Rollup_node_errors.Patch_durable_storage_on_commitment block_level) in let* context = load_context ~data_dir plugin Store_sigs.Read_write in - let* state = get_wasm_pvm_state ~l2_header context in + let* state = get_wasm_pvm_state context block_hash l2_block.header.context in (* Patches the state via an unsafe patch. *) let* patched_state = @@ -205,14 +182,7 @@ let patch_durable_storage ~data_dir ~key ~value = (* Replaces the PVM state. *) let*! context = Context.PVMState.set context patched_state in let*! new_commit = Context.commit context in - let new_l2_header = {l2_header with context = new_commit} in - let new_l2_block = {l2_block with header = (); content = ()} in - let* () = - Store.L2_blocks.append - store.l2_blocks - ~key:new_l2_header.block_hash - ~header:new_l2_header - ~value:new_l2_block + let new_l2_block = + {l2_block with header = {l2_block.header with context = new_commit}} in - let new_l2_block_with_header = {l2_block with header = new_l2_header} in - Store.L2_head.write store.l2_head new_l2_block_with_header + Store.L2_blocks.store store new_l2_block diff --git a/src/proto_017_PtNairob/lib_sc_rollup_node/inbox.ml b/src/proto_017_PtNairob/lib_sc_rollup_node/inbox.ml index 6f167257016b..03bc41d8f2ee 100644 --- a/src/proto_017_PtNairob/lib_sc_rollup_node/inbox.ml +++ b/src/proto_017_PtNairob/lib_sc_rollup_node/inbox.ml @@ -173,7 +173,7 @@ let process_messages (node_ctxt : _ Node_context.t) ~is_first_block Node_context.save_messages node_ctxt witness_hash - ~predecessor:predecessor.hash + ~level:(Int32.succ predecessor.level) messages_with_protocol_internal_messages in return @@ -207,11 +207,7 @@ let process_head (node_ctxt : _ Node_context.t) ~(predecessor : Layer1.header) Octez_smart_rollup.Inbox.Skip_list.content inbox.old_levels_messages in let* () = - Node_context.save_messages - node_ctxt - witness - ~predecessor:predecessor.hash - [] + Node_context.save_messages node_ctxt witness ~level:head.level [] in let* inbox_hash = Node_context.save_inbox node_ctxt inbox in return (inbox_hash, inbox, witness, []) diff --git a/src/proto_017_PtNairob/lib_sc_rollup_node/refutation_game_helpers.ml b/src/proto_017_PtNairob/lib_sc_rollup_node/refutation_game_helpers.ml index cec4af22fa66..22c753f8c834 100644 --- a/src/proto_017_PtNairob/lib_sc_rollup_node/refutation_game_helpers.ml +++ b/src/proto_017_PtNairob/lib_sc_rollup_node/refutation_game_helpers.ml @@ -202,7 +202,7 @@ let generate_proof (node_ctxt : _ Node_context.t) @@ let open Lwt_result_syntax in let* messages = - Messages.get + Node_context.get_messages node_ctxt (Sc_rollup_proto_types.Merkelized_payload_hashes_hash.to_octez witness) diff --git a/src/proto_018_Proxford/lib_sc_rollup_node/inbox.ml b/src/proto_018_Proxford/lib_sc_rollup_node/inbox.ml index 84f92273d386..f2df23cdf653 100644 --- a/src/proto_018_Proxford/lib_sc_rollup_node/inbox.ml +++ b/src/proto_018_Proxford/lib_sc_rollup_node/inbox.ml @@ -168,7 +168,7 @@ let process_messages (node_ctxt : _ Node_context.t) ~is_first_block Node_context.save_messages node_ctxt witness_hash - ~predecessor:predecessor.hash + ~level:(Int32.succ predecessor.level) messages_with_protocol_internal_messages in let inbox = Sc_rollup_proto_types.Inbox.to_octez inbox in @@ -207,11 +207,7 @@ let process_head (node_ctxt : _ Node_context.t) ~(predecessor : Layer1.header) Octez_smart_rollup.Inbox.Skip_list.content inbox.old_levels_messages in let* () = - Node_context.save_messages - node_ctxt - witness - ~predecessor:predecessor.hash - [] + Node_context.save_messages node_ctxt witness ~level:head.level [] in let* inbox_hash = Node_context.save_inbox node_ctxt inbox in return (inbox_hash, inbox, witness, []) diff --git a/src/proto_018_Proxford/lib_sc_rollup_node/refutation_game_helpers.ml b/src/proto_018_Proxford/lib_sc_rollup_node/refutation_game_helpers.ml index 8ef96817d3fb..34656f4ccd1a 100644 --- a/src/proto_018_Proxford/lib_sc_rollup_node/refutation_game_helpers.ml +++ b/src/proto_018_Proxford/lib_sc_rollup_node/refutation_game_helpers.ml @@ -214,7 +214,7 @@ let generate_proof (node_ctxt : _ Node_context.t) ~error:(Format.kasprintf Stdlib.failwith "%a" pp_print_trace)) @@ let open Lwt_result_syntax in - let* messages = Messages.get node_ctxt witness in + let* messages = Node_context.get_messages node_ctxt witness in let*? hist = Inbox.payloads_history_of_all_messages messages in return hist end diff --git a/src/proto_019_PtParisB/lib_sc_rollup_node/inbox.ml b/src/proto_019_PtParisB/lib_sc_rollup_node/inbox.ml index 84f92273d386..f2df23cdf653 100644 --- a/src/proto_019_PtParisB/lib_sc_rollup_node/inbox.ml +++ b/src/proto_019_PtParisB/lib_sc_rollup_node/inbox.ml @@ -168,7 +168,7 @@ let process_messages (node_ctxt : _ Node_context.t) ~is_first_block Node_context.save_messages node_ctxt witness_hash - ~predecessor:predecessor.hash + ~level:(Int32.succ predecessor.level) messages_with_protocol_internal_messages in let inbox = Sc_rollup_proto_types.Inbox.to_octez inbox in @@ -207,11 +207,7 @@ let process_head (node_ctxt : _ Node_context.t) ~(predecessor : Layer1.header) Octez_smart_rollup.Inbox.Skip_list.content inbox.old_levels_messages in let* () = - Node_context.save_messages - node_ctxt - witness - ~predecessor:predecessor.hash - [] + Node_context.save_messages node_ctxt witness ~level:head.level [] in let* inbox_hash = Node_context.save_inbox node_ctxt inbox in return (inbox_hash, inbox, witness, []) diff --git a/src/proto_019_PtParisB/lib_sc_rollup_node/refutation_game_helpers.ml b/src/proto_019_PtParisB/lib_sc_rollup_node/refutation_game_helpers.ml index 7ea4b3650dc3..c5f45bfb210c 100644 --- a/src/proto_019_PtParisB/lib_sc_rollup_node/refutation_game_helpers.ml +++ b/src/proto_019_PtParisB/lib_sc_rollup_node/refutation_game_helpers.ml @@ -229,7 +229,7 @@ let generate_proof (node_ctxt : _ Node_context.t) ~error:(Format.kasprintf Stdlib.failwith "%a" pp_print_trace)) @@ let open Lwt_result_syntax in - let* messages = Messages.get node_ctxt witness in + let* messages = Node_context.get_messages node_ctxt witness in let*? hist = Inbox.payloads_history_of_all_messages messages in return hist end diff --git a/src/proto_020_PsParisC/lib_sc_rollup_node/inbox.ml b/src/proto_020_PsParisC/lib_sc_rollup_node/inbox.ml index 84f92273d386..f2df23cdf653 100644 --- a/src/proto_020_PsParisC/lib_sc_rollup_node/inbox.ml +++ b/src/proto_020_PsParisC/lib_sc_rollup_node/inbox.ml @@ -168,7 +168,7 @@ let process_messages (node_ctxt : _ Node_context.t) ~is_first_block Node_context.save_messages node_ctxt witness_hash - ~predecessor:predecessor.hash + ~level:(Int32.succ predecessor.level) messages_with_protocol_internal_messages in let inbox = Sc_rollup_proto_types.Inbox.to_octez inbox in @@ -207,11 +207,7 @@ let process_head (node_ctxt : _ Node_context.t) ~(predecessor : Layer1.header) Octez_smart_rollup.Inbox.Skip_list.content inbox.old_levels_messages in let* () = - Node_context.save_messages - node_ctxt - witness - ~predecessor:predecessor.hash - [] + Node_context.save_messages node_ctxt witness ~level:head.level [] in let* inbox_hash = Node_context.save_inbox node_ctxt inbox in return (inbox_hash, inbox, witness, []) diff --git a/src/proto_020_PsParisC/lib_sc_rollup_node/refutation_game_helpers.ml b/src/proto_020_PsParisC/lib_sc_rollup_node/refutation_game_helpers.ml index 7ea4b3650dc3..c5f45bfb210c 100644 --- a/src/proto_020_PsParisC/lib_sc_rollup_node/refutation_game_helpers.ml +++ b/src/proto_020_PsParisC/lib_sc_rollup_node/refutation_game_helpers.ml @@ -229,7 +229,7 @@ let generate_proof (node_ctxt : _ Node_context.t) ~error:(Format.kasprintf Stdlib.failwith "%a" pp_print_trace)) @@ let open Lwt_result_syntax in - let* messages = Messages.get node_ctxt witness in + let* messages = Node_context.get_messages node_ctxt witness in let*? hist = Inbox.payloads_history_of_all_messages messages in return hist end diff --git a/src/proto_021_PtQenaB1/lib_sc_rollup_node/inbox.ml b/src/proto_021_PtQenaB1/lib_sc_rollup_node/inbox.ml index 84f92273d386..f2df23cdf653 100644 --- a/src/proto_021_PtQenaB1/lib_sc_rollup_node/inbox.ml +++ b/src/proto_021_PtQenaB1/lib_sc_rollup_node/inbox.ml @@ -168,7 +168,7 @@ let process_messages (node_ctxt : _ Node_context.t) ~is_first_block Node_context.save_messages node_ctxt witness_hash - ~predecessor:predecessor.hash + ~level:(Int32.succ predecessor.level) messages_with_protocol_internal_messages in let inbox = Sc_rollup_proto_types.Inbox.to_octez inbox in @@ -207,11 +207,7 @@ let process_head (node_ctxt : _ Node_context.t) ~(predecessor : Layer1.header) Octez_smart_rollup.Inbox.Skip_list.content inbox.old_levels_messages in let* () = - Node_context.save_messages - node_ctxt - witness - ~predecessor:predecessor.hash - [] + Node_context.save_messages node_ctxt witness ~level:head.level [] in let* inbox_hash = Node_context.save_inbox node_ctxt inbox in return (inbox_hash, inbox, witness, []) diff --git a/src/proto_021_PtQenaB1/lib_sc_rollup_node/refutation_game_helpers.ml b/src/proto_021_PtQenaB1/lib_sc_rollup_node/refutation_game_helpers.ml index 7ea4b3650dc3..c5f45bfb210c 100644 --- a/src/proto_021_PtQenaB1/lib_sc_rollup_node/refutation_game_helpers.ml +++ b/src/proto_021_PtQenaB1/lib_sc_rollup_node/refutation_game_helpers.ml @@ -229,7 +229,7 @@ let generate_proof (node_ctxt : _ Node_context.t) ~error:(Format.kasprintf Stdlib.failwith "%a" pp_print_trace)) @@ let open Lwt_result_syntax in - let* messages = Messages.get node_ctxt witness in + let* messages = Node_context.get_messages node_ctxt witness in let*? hist = Inbox.payloads_history_of_all_messages messages in return hist end diff --git a/src/proto_alpha/lib_sc_rollup_node/inbox.ml b/src/proto_alpha/lib_sc_rollup_node/inbox.ml index 84f92273d386..f2df23cdf653 100644 --- a/src/proto_alpha/lib_sc_rollup_node/inbox.ml +++ b/src/proto_alpha/lib_sc_rollup_node/inbox.ml @@ -168,7 +168,7 @@ let process_messages (node_ctxt : _ Node_context.t) ~is_first_block Node_context.save_messages node_ctxt witness_hash - ~predecessor:predecessor.hash + ~level:(Int32.succ predecessor.level) messages_with_protocol_internal_messages in let inbox = Sc_rollup_proto_types.Inbox.to_octez inbox in @@ -207,11 +207,7 @@ let process_head (node_ctxt : _ Node_context.t) ~(predecessor : Layer1.header) Octez_smart_rollup.Inbox.Skip_list.content inbox.old_levels_messages in let* () = - Node_context.save_messages - node_ctxt - witness - ~predecessor:predecessor.hash - [] + Node_context.save_messages node_ctxt witness ~level:head.level [] in let* inbox_hash = Node_context.save_inbox node_ctxt inbox in return (inbox_hash, inbox, witness, []) diff --git a/src/proto_alpha/lib_sc_rollup_node/refutation_game_helpers.ml b/src/proto_alpha/lib_sc_rollup_node/refutation_game_helpers.ml index 7ea4b3650dc3..c5f45bfb210c 100644 --- a/src/proto_alpha/lib_sc_rollup_node/refutation_game_helpers.ml +++ b/src/proto_alpha/lib_sc_rollup_node/refutation_game_helpers.ml @@ -229,7 +229,7 @@ let generate_proof (node_ctxt : _ Node_context.t) ~error:(Format.kasprintf Stdlib.failwith "%a" pp_print_trace)) @@ let open Lwt_result_syntax in - let* messages = Messages.get node_ctxt witness in + let* messages = Node_context.get_messages node_ctxt witness in let*? hist = Inbox.payloads_history_of_all_messages messages in return hist end -- GitLab From 39eaae1cf06b454101f82a2a8396e0d7d9dbedc3 Mon Sep 17 00:00:00 2001 From: Alain Mebsout Date: Mon, 23 Sep 2024 11:16:11 +0200 Subject: [PATCH 18/21] Rollup node: snapshots with SQLite store --- src/lib_smart_rollup_node/snapshots.ml | 25 ++++++++++++---------- src/lib_smart_rollup_node/sql_store.ml | 28 ++++++++++++++++++++++++- src/lib_smart_rollup_node/sql_store.mli | 10 +++++++++ 3 files changed, 51 insertions(+), 12 deletions(-) diff --git a/src/lib_smart_rollup_node/snapshots.ml b/src/lib_smart_rollup_node/snapshots.ml index c7a430b47669..06ce699d7fa3 100644 --- a/src/lib_smart_rollup_node/snapshots.ml +++ b/src/lib_smart_rollup_node/snapshots.ml @@ -655,11 +655,9 @@ let post_export_checks ~snapshot_file = snapshot_header ~dest -let operator_local_file_regexp = Re.Str.regexp "^storage/lpc$" - let snapshotable_files_regexp = Re.Str.regexp - "^\\(storage/.*\\|context/.*\\|wasm_2_0_0/.*\\|arith/.*\\|riscv/.*\\|context/.*\\|metadata$\\)" + "^\\(storage/version\\|context/.*\\|wasm_2_0_0/.*\\|arith/.*\\|riscv/.*\\|context/.*\\|metadata$\\)" let maybe_cancel_gc ~rollup_node_endpoint = let open Lwt_syntax in @@ -751,12 +749,10 @@ let export_dir (header : Header.t) ~unlock ~compression ~data_dir ~dest | Some dest -> Filename.concat dest dest_file_name | None -> dest_file_name in - let*! () = - let open Lwt_syntax in - let* () = Option.iter_s Lwt_utils_unix.create_dir dest in + let* () = + let*! () = Option.iter_s Lwt_utils_unix.create_dir dest in let include_file relative_path = Re.Str.string_match snapshotable_files_regexp relative_path 0 - && not (Re.Str.string_match operator_local_file_regexp relative_path 0) in let files = Tezos_stdlib_unix.Utils.fold_files @@ -768,6 +764,10 @@ let export_dir (header : Header.t) ~unlock ~compression ~data_dir ~dest (full_path, relative_path) :: acc) [] in + Lwt_utils_unix.with_tempdir "rollup_node_sqlite_export_" @@ fun tmp_dir -> + let output_db_file = Filename.concat tmp_dir Store.sqlite_file_name in + let* () = Store.export_store ~data_dir ~output_db_file in + let files = (output_db_file, Store.sqlite_file_name) :: files in let writer = match compression with | On_the_fly -> gzip_writer @@ -807,10 +807,7 @@ let export_compact cctxt ~no_checks ~compression ~data_dir ~dest ~filename = in Lwt_utils_unix.with_tempdir "snapshot_temp_" @@ fun tmp_dir -> let tmp_context_dir = Configuration.default_context_dir tmp_dir in - let tmp_store_dir = Configuration.default_storage_dir tmp_dir in let*! () = Lwt_utils_unix.create_dir tmp_context_dir in - let*! () = Lwt_utils_unix.create_dir tmp_store_dir in - let store_dir = Configuration.default_storage_dir data_dir in let context_dir = Configuration.default_context_dir data_dir in let* store = Store.init Read_only ~data_dir in let* metadata = Metadata.read_metadata_file ~dir:data_dir in @@ -853,8 +850,10 @@ let export_compact cctxt ~no_checks ~compression ~data_dir ~dest ~filename = if Sys.file_exists path then Tezos_stdlib_unix.Utils.copy_file ~src:path ~dst:(tmp_dir // a) in - Tezos_stdlib_unix.Utils.copy_dir store_dir tmp_store_dir ; + let output_db_file = Filename.concat tmp_dir Store.sqlite_file_name in + let* () = Store.export_store ~data_dir ~output_db_file in copy_file "metadata" ; + copy_dir "storage" ; copy_dir "wasm_2_0_0" ; copy_dir "arith" ; copy_dir "riscv" ; @@ -983,6 +982,10 @@ let import ~apply_unsafe_patches ~no_checks ~force cctxt ~data_dir ~snapshot_file ~dest:data_dir in + let rm f = + try Unix.unlink f with Unix.Unix_error (Unix.ENOENT, _, _) -> () + in + List.iter rm Store.extra_sqlite_files ; let* () = maybe_reconstruct_context cctxt ~data_dir ~apply_unsafe_patches in let* () = correct_history_mode ~data_dir snapshot_header original_history_mode diff --git a/src/lib_smart_rollup_node/sql_store.ml b/src/lib_smart_rollup_node/sql_store.ml index a18ac337c8e4..c504b7f860bc 100644 --- a/src/lib_smart_rollup_node/sql_store.ml +++ b/src/lib_smart_rollup_node/sql_store.ml @@ -262,6 +262,8 @@ end let sqlite_file_name = "store.sqlite" +let extra_sqlite_files = [sqlite_file_name ^ "-wal"; sqlite_file_name ^ "-shm"] + type 'a t = Sqlite.t type rw = Store_sigs.rw t @@ -1107,6 +1109,12 @@ module State = struct @@ Format.sprintf {sql|SELECT value, level from rollup_node_state WHERE name = %S|sql} N.name + + let delete = + (unit ->. unit) + @@ Format.sprintf + {sql|DELETE from rollup_node_state WHERE name = %S|sql} + N.name end module type S = sig @@ -1157,7 +1165,8 @@ module State = struct val name : string val type_ : value Caqti_type.t - end) : S with type value := N.value * int32 = struct + end) = + struct module Q = Q (N) let set = Q.set_both N.type_ @@ -1170,6 +1179,9 @@ module State = struct let get ?conn store = with_connection store conn @@ fun conn -> Sqlite.Db.find_opt conn get () + + let delete ?conn store = + with_connection store conn @@ fun conn -> Sqlite.Db.exec conn Q.delete () end module Finalized_level = Make_level (struct @@ -1247,3 +1259,17 @@ let gc store ~level = in let*! () = Events.(emit finish_gc) () in return_unit + +let export_store ~data_dir ~output_db_file = + let open Lwt_result_syntax in + let* store = init Read_only ~data_dir in + Sqlite.use store @@ fun conn -> + let* () = Sqlite.vacuum ~conn ~output_db_file in + (* Remove operator specific information *) + let* store = + Sqlite.init ~path:output_db_file ~perm:`Read_write (fun _ -> return_unit) + in + let* () = State.LPC.delete store in + let* () = Sqlite.use store @@ fun conn -> Sqlite.vacuum_self ~conn in + let*! () = close store in + return_unit diff --git a/src/lib_smart_rollup_node/sql_store.mli b/src/lib_smart_rollup_node/sql_store.mli index e3872acbf425..78b00a1a7760 100644 --- a/src/lib_smart_rollup_node/sql_store.mli +++ b/src/lib_smart_rollup_node/sql_store.mli @@ -17,6 +17,9 @@ type ro = Store_sigs.ro t (** Name of SQLite file in data directory. *) val sqlite_file_name : string +(** Other files that make up the store. *) +val extra_sqlite_files : string list + (** [init mode ~data_dir] initializes the store and returns it. *) val init : 'a Store_sigs.mode -> data_dir:string -> 'a t tzresult Lwt.t @@ -31,6 +34,13 @@ val readonly : _ t -> ro (by removing information from the database). *) val gc : rw -> level:int32 -> unit tzresult Lwt.t +(** [export_store ~data_dir ~output_db_file] exports the store database with + data from the [data_dir] into the [output_db_file]. This function also + removes data that is specific to the operator. This function is meant to be + used to produce snapshots. *) +val export_store : + data_dir:string -> output_db_file:string -> unit tzresult Lwt.t + (** [with_transaction store f] executes [f] with a single connection to the database in a transaction. I.e., if [f] returns an error or raises an exception, the store will not be modified. *) -- GitLab From 1d435e32db061460bd4b3198460d81be51c92fad Mon Sep 17 00:00:00 2001 From: Alain Mebsout Date: Mon, 23 Sep 2024 14:44:23 +0200 Subject: [PATCH 19/21] Rollup node: Store in its own library This is to share with the EVM node which needs read access. --- etherlink/bin_node/lib_dev/dune | 1 + manifest/product_etherlink.ml | 1 + manifest/product_octez.ml | 26 ++++++++++++++++-- manifest/product_octez.mli | 2 ++ src/lib_smart_rollup_node/configuration.ml | 2 +- src/lib_smart_rollup_node/configuration.mli | 2 +- src/lib_smart_rollup_node/dune | 30 ++++++++++++++++++--- src/lib_smart_rollup_node/sql_store.ml | 21 +++++++++------ src/lib_smart_rollup_node/sql_store.mli | 9 ++++++- src/lib_smart_rollup_node/test/dune | 2 ++ 10 files changed, 79 insertions(+), 17 deletions(-) diff --git a/etherlink/bin_node/lib_dev/dune b/etherlink/bin_node/lib_dev/dune index 4ad8f9b6cace..ef9f42391c0e 100644 --- a/etherlink/bin_node/lib_dev/dune +++ b/etherlink/bin_node/lib_dev/dune @@ -27,6 +27,7 @@ octez-smart-rollup-wasm-debugger-lib octez-l2-libs.layer2_store octez-l2-libs.smart-rollup + octez-l2-libs.octez-smart-rollup-node-lib.store octez-evm-node-libs.evm_node_migrations octez-evm-node-libs.evm_node_kernels octez-libs.prometheus-app diff --git a/manifest/product_etherlink.ml b/manifest/product_etherlink.ml index 8e03af93c0a4..e5b5fe0cc017 100644 --- a/manifest/product_etherlink.ml +++ b/manifest/product_etherlink.ml @@ -232,6 +232,7 @@ let evm_node_lib_dev = octez_scoru_wasm_debugger_lib |> open_; octez_layer2_store |> open_; octez_smart_rollup_lib |> open_; + octez_smart_rollup_node_store_lib; evm_node_migrations; evm_node_kernels; prometheus_app; diff --git a/manifest/product_octez.ml b/manifest/product_octez.ml index 08f10b34e221..f48f08f506f4 100644 --- a/manifest/product_octez.ml +++ b/manifest/product_octez.ml @@ -4676,12 +4676,34 @@ let rollup_node_sqlite_migrations = ]; ] +let octez_smart_rollup_node_store_lib_modules = + ["store_version"; "sql_store"; "store_v5"; "store"] + +let octez_smart_rollup_node_store_lib = + octez_l2_lib + "octez-smart-rollup-node-lib.store" + ~internal_name:"octez_smart_rollup_node_store" + ~path:"src/lib_smart_rollup_node" + ~synopsis:"Octez: library for accessing the store of the Smart Rollup node" + ~modules:octez_smart_rollup_node_store_lib_modules + ~deps: + [ + octez_base |> open_ ~m:"TzPervasives" |> open_; + octez_base_unix; + octez_stdlib_unix |> open_; + octez_layer2_store |> open_; + rollup_node_sqlite_migrations; + octez_sqlite |> open_; + octez_smart_rollup_lib |> open_; + ] + let octez_smart_rollup_node_lib = public_lib "octez-smart-rollup-node-lib" ~internal_name:"octez_smart_rollup_node" ~path:"src/lib_smart_rollup_node" ~synopsis:"Octez: library for Smart Rollup node" + ~all_modules_except:octez_smart_rollup_node_store_lib_modules ~deps: [ octez_base |> open_ ~m:"TzPervasives" |> open_; @@ -4700,8 +4722,7 @@ let octez_smart_rollup_node_lib = octez_injector_lib |> open_; octez_version_value |> open_; octez_layer2_store |> open_; - rollup_node_sqlite_migrations; - octez_sqlite |> open_; + octez_smart_rollup_node_store_lib |> open_; octez_crawler |> open_; octez_workers |> open_; octez_smart_rollup_lib |> open_; @@ -8190,6 +8211,7 @@ let _octez_smart_rollup_node_lib_tests = octez_test_helpers |> open_; octez_layer2_store |> open_; octez_smart_rollup_lib |> open_; + octez_smart_rollup_node_store_lib |> open_; octez_smart_rollup_node_lib |> open_; helpers |> open_; alcotezt; diff --git a/manifest/product_octez.mli b/manifest/product_octez.mli index b40c74c29d1d..81b06ff96b8d 100644 --- a/manifest/product_octez.mli +++ b/manifest/product_octez.mli @@ -63,6 +63,8 @@ val octez_signer_services : Manifest.target val octez_smart_rollup_lib : Manifest.target +val octez_smart_rollup_node_store_lib : Manifest.target + val octez_stdlib_unix : Manifest.target val octez_test_helpers : Manifest.target diff --git a/src/lib_smart_rollup_node/configuration.ml b/src/lib_smart_rollup_node/configuration.ml index e3c9e5824453..3e9a26288b8f 100644 --- a/src/lib_smart_rollup_node/configuration.ml +++ b/src/lib_smart_rollup_node/configuration.ml @@ -51,7 +51,7 @@ type gc_parameters = { context_splitting_period : int option; } -type history_mode = Archive | Full +type history_mode = Store.State.history_mode = Archive | Full type outbox_destination_filter = | Any_destination diff --git a/src/lib_smart_rollup_node/configuration.mli b/src/lib_smart_rollup_node/configuration.mli index 151457e0a89c..eca2bf0f38f4 100644 --- a/src/lib_smart_rollup_node/configuration.mli +++ b/src/lib_smart_rollup_node/configuration.mli @@ -76,7 +76,7 @@ type gc_parameters = { (** Number of blocks before splitting the context. *) } -type history_mode = +type history_mode = Store.State.history_mode = | Archive (** The whole history of the rollup (starting at its genesis) is kept *) | Full diff --git a/src/lib_smart_rollup_node/dune b/src/lib_smart_rollup_node/dune index 819e6ee31241..edf7ada75411 100644 --- a/src/lib_smart_rollup_node/dune +++ b/src/lib_smart_rollup_node/dune @@ -1,6 +1,28 @@ ; This file was automatically generated, do not edit. ; Edit file manifest/main.ml instead. +(library + (name octez_smart_rollup_node_store) + (public_name octez-l2-libs.octez-smart-rollup-node-lib.store) + (instrumentation (backend bisect_ppx)) + (libraries + octez-libs.base + octez-libs.base.unix + octez-libs.stdlib-unix + octez-l2-libs.layer2_store + octez-l2-libs.rollup_node_sqlite_migrations + octez-l2-libs.sqlite + octez-l2-libs.smart-rollup) + (flags + (:standard) + -open Tezos_base.TzPervasives + -open Tezos_base + -open Tezos_stdlib_unix + -open Tezos_layer2_store + -open Octez_sqlite + -open Octez_smart_rollup) + (modules store_version sql_store store_v5 store)) + (library (name octez_smart_rollup_node) (public_name octez-smart-rollup-node-lib) @@ -22,8 +44,7 @@ octez-injector octez-version.value octez-l2-libs.layer2_store - octez-l2-libs.rollup_node_sqlite_migrations - octez-l2-libs.sqlite + octez-l2-libs.octez-smart-rollup-node-lib.store octez-crawler octez-libs.tezos-workers octez-l2-libs.smart-rollup) @@ -41,7 +62,8 @@ -open Octez_injector -open Tezos_version_value -open Tezos_layer2_store - -open Octez_sqlite + -open Octez_smart_rollup_node_store -open Octez_crawler -open Tezos_workers - -open Octez_smart_rollup)) + -open Octez_smart_rollup) + (modules (:standard \ store_version sql_store store_v5 store))) diff --git a/src/lib_smart_rollup_node/sql_store.ml b/src/lib_smart_rollup_node/sql_store.ml index c504b7f860bc..9ac3a9472339 100644 --- a/src/lib_smart_rollup_node/sql_store.ml +++ b/src/lib_smart_rollup_node/sql_store.ml @@ -182,12 +182,6 @@ module Types = struct @@ proj level (fun i -> i.Inbox.level) @@ proj history_proof (fun i -> i.Inbox.old_levels_messages) @@ proj_end - - let history_mode = - custom - ~encode:(fun h -> Ok (Configuration.string_of_history_mode h)) - ~decode:(fun s -> Ok (Configuration.history_mode_of_string s)) - string end let table_exists_req = @@ -1224,12 +1218,23 @@ module State = struct let name = "last_context_split" end) + type history_mode = Archive | Full + + let history_mode_type = + custom + ~encode:(function Archive -> Ok "archive" | Full -> Ok "full") + ~decode:(function + | "archive" -> Ok Archive + | "full" -> Ok Full + | s -> Error ("Invalid history mode: " ^ s)) + string + module History_mode = Make_value (struct let name = "history_mode" - type value = Configuration.history_mode + type value = history_mode - let type_ = Types.history_mode + let type_ = history_mode_type end) module L2_head = Make_both (struct diff --git a/src/lib_smart_rollup_node/sql_store.mli b/src/lib_smart_rollup_node/sql_store.mli index 78b00a1a7760..563105f48dda 100644 --- a/src/lib_smart_rollup_node/sql_store.mli +++ b/src/lib_smart_rollup_node/sql_store.mli @@ -363,6 +363,13 @@ module State : sig val get : ?conn:Sqlite.conn -> _ t -> value option tzresult Lwt.t end + type history_mode = + | Archive + (** The whole history of the rollup (starting at its genesis) is kept *) + | Full + (** Only the history necessary to play refutation games is kept + (i.e. after the LCC only) *) + module Finalized_level : S with type value := int32 module LCC : S with type value := Commitment.Hash.t * int32 @@ -379,7 +386,7 @@ module State : sig module Last_context_split : S with type value := int32 - module History_mode : S with type value := Configuration.history_mode + module History_mode : S with type value := history_mode module L2_head : S with type value := Block_hash.t * int32 end diff --git a/src/lib_smart_rollup_node/test/dune b/src/lib_smart_rollup_node/test/dune index a6d2b20638c1..c01c77da6e9b 100644 --- a/src/lib_smart_rollup_node/test/dune +++ b/src/lib_smart_rollup_node/test/dune @@ -11,6 +11,7 @@ octez-libs.test-helpers octez-l2-libs.layer2_store octez-l2-libs.smart-rollup + octez-l2-libs.octez-smart-rollup-node-lib.store octez-smart-rollup-node-lib octez_smart_rollup_node_test_helpers octez-alcotezt) @@ -25,6 +26,7 @@ -open Tezos_test_helpers -open Tezos_layer2_store -open Octez_smart_rollup + -open Octez_smart_rollup_node_store -open Octez_smart_rollup_node -open Octez_smart_rollup_node_test_helpers -open Octez_alcotezt) -- GitLab From 27de9d51c0f7259a29da0db72f1cce057798d0c2 Mon Sep 17 00:00:00 2001 From: Alain Mebsout Date: Mon, 23 Sep 2024 15:16:39 +0200 Subject: [PATCH 20/21] Evm node: use rollup node store library --- etherlink/bin_node/lib_dev/evm_context.ml | 129 ++++--------- .../bin_node/lib_dev/rollup_node_storage.ml | 178 ------------------ 2 files changed, 39 insertions(+), 268 deletions(-) delete mode 100644 etherlink/bin_node/lib_dev/rollup_node_storage.ml diff --git a/etherlink/bin_node/lib_dev/evm_context.ml b/etherlink/bin_node/lib_dev/evm_context.ml index b55a540c5107..b659c902bd4f 100644 --- a/etherlink/bin_node/lib_dev/evm_context.ml +++ b/etherlink/bin_node/lib_dev/evm_context.ml @@ -130,11 +130,9 @@ module Request = struct | Earliest_state : (Evm_state.t option, tztrace) t | Earliest_number : (Ethereum_types.quantity option, tztrace) t | Reconstruct : { - rollup_node_data_dir : string; + rollup_node_store : [`Read] Octez_smart_rollup_node_store.Store.t; genesis_level : int32; finalized_level : int32; - levels_to_hashes : [`Read] Rollup_node_storage.Levels_to_hashes.t; - l2_blocks : [`Read] Rollup_node_storage.L2_blocks.t; } -> (unit, tztrace) t | Patch_state : { @@ -277,16 +275,13 @@ module Request = struct case (Tag 12) ~title:"Reconstruct" - (obj4 + (obj3 (req "request" (constant "reconstruct")) - (req "rollup_node_data_dir" string) (req "genesis_level" int32) (req "finalized_level" int32)) (function - | View - (Reconstruct - {rollup_node_data_dir; genesis_level; finalized_level; _}) -> - Some ((), rollup_node_data_dir, genesis_level, finalized_level) + | View (Reconstruct {genesis_level; finalized_level; _}) -> + Some ((), genesis_level, finalized_level) | _ -> None) (fun _ -> assert false @@ -1108,28 +1103,28 @@ module State = struct return_some Blueprint_types.{delayed_transactions; kernel_upgrade; blueprint} - let messages_of_level ~levels_to_hashes ~l2_blocks ~messages level = + let messages_of_level store level = let open Lwt_result_syntax in - let open Rollup_node_storage in - let* hash = Levels_to_hashes.find levels_to_hashes level in + let open Octez_smart_rollup_node_store in + let* hash = Store.L2_levels.find store level in let*? hash = Option.to_result ~none:[error_of_fmt "Block hash is not found for level %ld" level] hash in - let* block = L2_blocks.read l2_blocks hash in - let*? _, header = + let* block = Store.L2_blocks.find store hash in + let*? block = Option.to_result ~none:[error_of_fmt "Block is not found for hash %a" Block_hash.pp hash] block in - let* messages = Messages.read messages header.inbox_witness in - let*? messages = + let* messages = Store.Messages.find store block.header.inbox_witness in + let*? _level, messages = Option.to_result ~none:[error_of_fmt "No messages found for block %a" Block_hash.pp hash] messages in - return (fst messages) + return messages (** [reconstruct_commit_blocks ~current_block_number ctxt execute evm_state] loops on [execute evm_state] until no new block is @@ -1182,27 +1177,18 @@ module State = struct return evm_state else return evm_state - let reconstruct_history ctxt ~rollup_node_data_dir ~genesis_level - ~finalized_level ~levels_to_hashes ~l2_blocks = + let reconstruct_history ctxt ~rollup_node_store ~genesis_level + ~finalized_level = let open Lwt_result_syntax in (* Smart Rollups do not process messages of genesis level. *) let first_level = Int32.succ genesis_level in assert (finalized_level > first_level) ; - - let* messages = - Rollup_node_storage.load_messages ~rollup_node_data_dir () - in - - let messages_of_level = - messages_of_level ~levels_to_hashes ~l2_blocks ~messages - in - let config = pvm_config ctxt in let rec go ~count_progress ~current_block_number evm_state tezos_level = if tezos_level > finalized_level then return_unit else let*! () = count_progress 1 in - let* messages = messages_of_level tezos_level in + let* messages = messages_of_level rollup_node_store tezos_level in (* For now we use the mocked sol, ipl and eol of the debugger. *) let messages = match messages with @@ -1470,23 +1456,14 @@ module Handlers = struct match checkpoint with | Some (level, _checkpoint) -> return_some level | None -> return_none) - | Reconstruct - { - rollup_node_data_dir; - genesis_level; - finalized_level; - levels_to_hashes; - l2_blocks; - } -> + | Reconstruct {rollup_node_store; genesis_level; finalized_level} -> protect @@ fun () -> let ctxt = Worker.state self in State.reconstruct_history ctxt - ~rollup_node_data_dir + ~rollup_node_store ~genesis_level ~finalized_level - ~levels_to_hashes - ~l2_blocks | Patch_state {commit; key; value; block_number} -> protect @@ fun () -> let ctxt = Worker.state self in @@ -1620,48 +1597,20 @@ let rollup_node_metadata ~rollup_node_data_dir = | Some (V1 {rollup_address; genesis_info = {level; _}; _}) -> return (Address.to_string rollup_address, level) -let rollup_node_block_storage ~rollup_node_data_dir = - let open Lwt_result_syntax in - let open Rollup_node_storage in - let* last_finalized_level, levels_to_hashes, l2_blocks = - Rollup_node_storage.load ~rollup_node_data_dir () - in - let* final_level = Last_finalized_level.read last_finalized_level in - let*? final_level = - Option.to_result - ~none: - [error_of_fmt "Rollup node storage is missing the last finalized level"] - final_level - in - return (final_level, levels_to_hashes, l2_blocks) - let init_context_from_rollup_node ~data_dir ~rollup_node_data_dir = let open Lwt_result_syntax in - let open Rollup_node_storage in - let* final_level, levels_to_hashes, l2_blocks = - rollup_node_block_storage ~rollup_node_data_dir - in - let* final_level_hash = Levels_to_hashes.find levels_to_hashes final_level in - let*? final_level_hash = - Option.to_result - ~none: - [ - error_of_fmt - "Rollup node has no block hash for the l1 level %ld" - final_level; - ] - final_level_hash + let open Octez_smart_rollup_node_store in + let* rollup_node_store = + Store.init Read_only ~data_dir:rollup_node_data_dir in - let* final_l2_block = L2_blocks.read l2_blocks final_level_hash in - let* checkpoint = + let* final_l2_block = Store.L2_blocks.find_finalized rollup_node_store in + let* final_l2_block = match final_l2_block with - | Some Sc_rollup_block.(_, {context; _}) -> - Irmin_context.hash_of_context_hash context |> return - | None -> - failwith - "Rollup node has no l2 blocks for the l1 block hash %a" - Block_hash.pp - final_level_hash + | Some b -> return b + | None -> failwith "Rollup node has no finalized l2 block" + in + let checkpoint = + Irmin_context.hash_of_context_hash final_l2_block.header.context in let rollup_node_context_dir = Filename.Infix.(rollup_node_data_dir // "context") @@ -1680,7 +1629,7 @@ let init_context_from_rollup_node ~data_dir ~rollup_node_data_dir = let message = Format.sprintf "Exporting context for %ld in %s" - final_level + final_l2_block.header.level evm_context_dir in Tezos_stdlib_unix.Utils.copy_dir @@ -1696,7 +1645,7 @@ let init_context_from_rollup_node ~data_dir ~rollup_node_data_dir = Irmin_context.checkout_exn evm_node_index checkpoint in let*! evm_state = Irmin_context.PVMState.get evm_node_context in - return (evm_node_context, evm_state, final_level) + return (evm_node_context, evm_state, final_l2_block.header.level) let init_store_from_rollup_node ~data_dir ~evm_state ~irmin_context = let open Lwt_result_syntax in @@ -1787,8 +1736,15 @@ let apply_evm_events ?finalized_level events = let reconstruct ~data_dir ~rollup_node_data_dir ~boot_sector = let open Lwt_result_syntax in let* () = lock_data_dir ~data_dir in - let* finalized_level, levels_to_hashes, l2_blocks = - rollup_node_block_storage ~rollup_node_data_dir + let open Octez_smart_rollup_node_store in + let* rollup_node_store = + Store.init Read_only ~data_dir:rollup_node_data_dir + in + let* finalized_level = Store.State.Finalized_level.get rollup_node_store in + let* finalized_level = + match finalized_level with + | Some l -> return l + | None -> failwith "Rollup node has no finalized l2 block" in let* smart_rollup_address, genesis_level = rollup_node_metadata ~rollup_node_data_dir @@ -1806,14 +1762,7 @@ let reconstruct ~data_dir ~rollup_node_data_dir ~boot_sector = () in worker_wait_for_request - (Reconstruct - { - rollup_node_data_dir; - genesis_level; - finalized_level; - levels_to_hashes; - l2_blocks; - }) + (Reconstruct {rollup_node_store; genesis_level; finalized_level}) let init_from_rollup_node ~omit_delayed_tx_events ~data_dir ~rollup_node_data_dir () = diff --git a/etherlink/bin_node/lib_dev/rollup_node_storage.ml b/etherlink/bin_node/lib_dev/rollup_node_storage.ml deleted file mode 100644 index 962aa7dd4cfd..000000000000 --- a/etherlink/bin_node/lib_dev/rollup_node_storage.ml +++ /dev/null @@ -1,178 +0,0 @@ -(*****************************************************************************) -(* *) -(* SPDX-License-Identifier: MIT *) -(* Copyright (c) 2024 Nomadic Labs *) -(* *) -(*****************************************************************************) - -(* This module provides a subset of the rollup node storage. It could - be factorised with existing rollup node storage module if - needed/more used. *) - -module Last_finalized_level = Indexed_store.Make_singleton (struct - type t = int32 - - let name = "finalized_level" - - let encoding = Data_encoding.int32 -end) - -module Block_key = struct - include Block_hash - - let hash_size = 31 - - let t = - let open Repr in - map - (bytes_of (`Fixed hash_size)) - (fun b -> Block_hash.of_bytes_exn b) - (fun bh -> Block_hash.to_bytes bh) - - let encode bh = Block_hash.to_string bh - - let encoded_size = Block_hash.size - - let decode str off = - let str = String.sub str off encoded_size in - Block_hash.of_string_exn str - - let pp = Block_hash.pp -end - -module L2_blocks = - Indexed_store.Make_indexed_file - (struct - let name = "l2_blocks" - end) - (Block_key) - (struct - type t = (unit, unit) Sc_rollup_block.block - - let name = "sc_rollup_block_info" - - let encoding = - Sc_rollup_block.block_encoding Data_encoding.unit Data_encoding.unit - - module Header = struct - type t = Sc_rollup_block.header - - let name = "sc_rollup_block_header" - - let encoding = Sc_rollup_block.header_encoding - - let fixed_size = Sc_rollup_block.header_size - end - end) - -module Levels_to_hashes = - Indexed_store.Make_indexable - (struct - let name = "tezos_levels" - end) - (Indexed_store.Make_index_key (struct - type t = int32 - - let encoding = Data_encoding.int32 - - let name = "level" - - let fixed_size = 4 - - let equal = Int32.equal - end)) - (Block_key) - -module Make_hash_index_key (H : Tezos_crypto.Intfs.HASH) = -Indexed_store.Make_index_key (struct - include Indexed_store.Make_fixed_encodable (H) - - let equal = H.equal -end) - -module Messages = - Indexed_store.Make_indexed_file - (struct - let name = "messages" - end) - (Make_hash_index_key (Merkelized_payload_hashes_hash)) - (struct - type t = string list - - let name = "messages_list" - - let encoding = Data_encoding.(list @@ dynamic_size (Variable.string' Hex)) - - module Header = struct - type t = Block_hash.t - - let name = "messages_block" - - let encoding = Block_hash.encoding - - let fixed_size = - WithExceptions.Option.get ~loc:__LOC__ - @@ Data_encoding.Binary.fixed_length encoding - end - end) - -type history_mode = Archive | Full - -module History_mode = Indexed_store.Make_singleton (struct - type t = history_mode - - let name = "history_mode" - - let encoding = - Data_encoding.string_enum [("archive", Archive); ("full", Full)] -end) - -(** [load ~rollup_node_data_dir] load the needed storage from the - rollup node: last_finalized_level, levels_to_hashes, and - l2_blocks. default [index_buffer_size] is [10_000] an - [cache_size] is [300_000]. They are the same value as for the - rollup node. *) -let load ?(index_buffer_size = 10_000) ?(cache_size = 300_000) - ~rollup_node_data_dir () = - let open Lwt_result_syntax in - let store = Filename.Infix.(rollup_node_data_dir // "storage") in - let* last_finalized_level = - Last_finalized_level.load - Read_only - ~path:(Filename.concat store "last_finalized_level") - in - let* levels_to_hashes = - Levels_to_hashes.load - Read_only - ~index_buffer_size - ~path:(Filename.concat store "levels_to_hashes") - in - let* l2_blocks = - L2_blocks.load - Read_only - ~index_buffer_size - ~cache_size - ~path:(Filename.concat store "l2_blocks") - in - return (last_finalized_level, levels_to_hashes, l2_blocks) - -let load_messages ?(index_buffer_size = 10_000) ?(cache_size = 300_000) - ~rollup_node_data_dir () = - let open Lwt_result_syntax in - let store = Filename.Infix.(rollup_node_data_dir // "storage") in - let path name = Filename.concat store name in - (* We need an archive mode to reconstruct the history. *) - let* () = - let* history_mode_store = - History_mode.load Read_only ~path:(path "history_mode") - in - let* history_mode = History_mode.read history_mode_store in - match history_mode with - | Some Archive -> return_unit - | Some Full -> - failwith - "The history cannot be reconstructed from a full rollup node, it \ - needs an archive one." - | None -> failwith "The history mode of the rollup node cannot be found." - in - Messages.load ~index_buffer_size ~cache_size Read_only ~path:(path "messages") -- GitLab From 5cf04b80e7021c17408bd3c33f39a1936cb33d80 Mon Sep 17 00:00:00 2001 From: Alain Mebsout Date: Mon, 23 Sep 2024 17:18:01 +0200 Subject: [PATCH 21/21] Rollup node/store: register finalized block hash in addition to level --- etherlink/bin_node/lib_dev/evm_context.ml | 6 +++--- src/lib_smart_rollup_node/node_context.ml | 8 ++++---- src/lib_smart_rollup_node/node_context.mli | 8 ++++---- src/lib_smart_rollup_node/rollup_node_daemon.ml | 7 ++++--- src/lib_smart_rollup_node/sql_store.ml | 8 ++++++-- src/lib_smart_rollup_node/sql_store.mli | 2 +- 6 files changed, 22 insertions(+), 17 deletions(-) diff --git a/etherlink/bin_node/lib_dev/evm_context.ml b/etherlink/bin_node/lib_dev/evm_context.ml index b659c902bd4f..f413fa17d5bc 100644 --- a/etherlink/bin_node/lib_dev/evm_context.ml +++ b/etherlink/bin_node/lib_dev/evm_context.ml @@ -1740,10 +1740,10 @@ let reconstruct ~data_dir ~rollup_node_data_dir ~boot_sector = let* rollup_node_store = Store.init Read_only ~data_dir:rollup_node_data_dir in - let* finalized_level = Store.State.Finalized_level.get rollup_node_store in + let* finalized = Store.State.Finalized_level.get rollup_node_store in let* finalized_level = - match finalized_level with - | Some l -> return l + match finalized with + | Some (_block, l) -> return l | None -> failwith "Rollup node has no finalized l2 block" in let* smart_rollup_address, genesis_level = diff --git a/src/lib_smart_rollup_node/node_context.ml b/src/lib_smart_rollup_node/node_context.ml index 7f2853344c63..9deb49a2d8a5 100644 --- a/src/lib_smart_rollup_node/node_context.ml +++ b/src/lib_smart_rollup_node/node_context.ml @@ -286,13 +286,13 @@ let is_processed {store; _} head = let last_processed_head_opt {store; _} = Store.L2_blocks.find_head store -let mark_finalized_level {store; _} level = - Store.State.Finalized_level.set store level +let set_finalized {store; _} hash level = + Store.State.Finalized_level.set store (hash, level) let get_finalized_level {store; _} = let open Lwt_result_syntax in - let+ level = Store.State.Finalized_level.get store in - Option.value level ~default:0l + let+ f = Store.State.Finalized_level.get store in + match f with None -> 0l | Some (_h, l) -> l let find_l2_block {store; _} block_hash = Store.L2_blocks.find store block_hash diff --git a/src/lib_smart_rollup_node/node_context.mli b/src/lib_smart_rollup_node/node_context.mli index db92360b1cae..1fe449a731d1 100644 --- a/src/lib_smart_rollup_node/node_context.mli +++ b/src/lib_smart_rollup_node/node_context.mli @@ -267,10 +267,10 @@ val set_l2_head : rw -> Sc_rollup_block.t -> unit tzresult Lwt.t exists. *) val last_processed_head_opt : _ t -> Sc_rollup_block.t option tzresult Lwt.t -(** [mark_finalized_head store head] remembers that the [head] is finalized. By - construction, every block whose level is smaller than [head]'s is also - finalized. *) -val mark_finalized_level : rw -> int32 -> unit tzresult Lwt.t +(** [mark_finalized_head store hash level] remembers that the block with [hash] + at [level] is finalized. By construction, every block whose level is smaller + than [level] is also finalized. *) +val set_finalized : rw -> Block_hash.t -> int32 -> unit tzresult Lwt.t (** [get_finalized_level t] returns the last finalized level. *) val get_finalized_level : _ t -> int32 tzresult Lwt.t diff --git a/src/lib_smart_rollup_node/rollup_node_daemon.ml b/src/lib_smart_rollup_node/rollup_node_daemon.ml index 3ef920c4b1d9..76a97cba0715 100644 --- a/src/lib_smart_rollup_node/rollup_node_daemon.ml +++ b/src/lib_smart_rollup_node/rollup_node_daemon.ml @@ -207,9 +207,10 @@ let process_unseen_head ({node_ctxt; _} as state) ~catching_up ~predecessor Sc_rollup_block.{header; content = (); num_ticks; initial_tick} in let* () = - Node_context.mark_finalized_level - node_ctxt - Int32.(sub head.level (of_int node_ctxt.block_finality_time)) + assert (node_ctxt.block_finality_time = 2) ; + let finalized_level = Int32.pred predecessor.header.level in + let finalized_hash = predecessor.header.predecessor in + Node_context.set_finalized node_ctxt finalized_hash finalized_level in let* () = Node_context.save_l2_block node_ctxt l2_block in let end_timestamp = Time.System.now () in diff --git a/src/lib_smart_rollup_node/sql_store.ml b/src/lib_smart_rollup_node/sql_store.ml index 9ac3a9472339..a576a6c491ed 100644 --- a/src/lib_smart_rollup_node/sql_store.ml +++ b/src/lib_smart_rollup_node/sql_store.ml @@ -961,7 +961,7 @@ module L2_blocks = struct b.inbox_hash, b.initial_tick, b.num_ticks FROM l2_blocks AS b INNER JOIN rollup_node_state as s - ON s.name = "finalized_level" AND s.level = b.level + ON s.name = "finalized_level" AND s.value = b.block_hash |sql} let select_level_and_predecessor = @@ -1178,8 +1178,12 @@ module State = struct with_connection store conn @@ fun conn -> Sqlite.Db.exec conn Q.delete () end - module Finalized_level = Make_level (struct + module Finalized_level = Make_both (struct let name = "finalized_level" + + type value = Block_hash.t + + let type_ = Types.block_hash end) module LCC = Make_both (struct diff --git a/src/lib_smart_rollup_node/sql_store.mli b/src/lib_smart_rollup_node/sql_store.mli index 563105f48dda..734ccf769a09 100644 --- a/src/lib_smart_rollup_node/sql_store.mli +++ b/src/lib_smart_rollup_node/sql_store.mli @@ -370,7 +370,7 @@ module State : sig (** Only the history necessary to play refutation games is kept (i.e. after the LCC only) *) - module Finalized_level : S with type value := int32 + module Finalized_level : S with type value := Block_hash.t * int32 module LCC : S with type value := Commitment.Hash.t * int32 -- GitLab