From 70eecd969cfd8c4fd728fe68898c0395762b0ed1 Mon Sep 17 00:00:00 2001 From: Eugen Zalinescu Date: Thu, 6 Feb 2025 12:57:40 +0100 Subject: [PATCH 1/3] DAL/Node: store the node's last finalized level in the context --- src/bin_dal_node/daemon.ml | 2 ++ src/bin_dal_node/node_context.ml | 11 ++++++++++- src/bin_dal_node/node_context.mli | 8 ++++++++ 3 files changed, 20 insertions(+), 1 deletion(-) diff --git a/src/bin_dal_node/daemon.ml b/src/bin_dal_node/daemon.ml index 5961b8e45db2..ed21b2f47044 100644 --- a/src/bin_dal_node/daemon.ml +++ b/src/bin_dal_node/daemon.ml @@ -672,6 +672,7 @@ module Handler = struct | None -> Lwt.fail_with "L1 crawler lib shut down" | Some (_finalized_hash, finalized_shell_header) -> let level = finalized_shell_header.level in + let () = Node_context.set_last_finalized_level ctxt level in (* At each potential published_level [level], we prefetch the committee for its corresponding attestation_level (that is: level + attestation_lag - 1). This is in particular used by GS @@ -1405,6 +1406,7 @@ let run ~data_dir ~configuration_override = gs_worker transport_layer cctxt + ~last_finalized_level:head_level in let* () = match Profile_manager.get_profiles profile_ctxt with diff --git a/src/bin_dal_node/node_context.ml b/src/bin_dal_node/node_context.ml index e8a47fe1c5c5..19f2c0c62445 100644 --- a/src/bin_dal_node/node_context.ml +++ b/src/bin_dal_node/node_context.ml @@ -40,10 +40,14 @@ type t = { gs_worker : Gossipsub.Worker.t; transport_layer : Gossipsub.Transport_layer.t; mutable profile_ctxt : Profile_manager.t; + mutable last_finalized_level : int32; + (* the highest finalized level the DAL node is aware of (except at start-up, where + it is the highest level the node is aware of) *) } let init config profile_ctxt cryptobox shards_proofs_precomputation - proto_parameters proto_plugins store gs_worker transport_layer cctxt = + proto_parameters proto_plugins store gs_worker transport_layer cctxt + ~last_finalized_level = let neighbors_cctxts = List.map (fun Configuration_file.{addr; port} -> @@ -69,6 +73,7 @@ let init config profile_ctxt cryptobox shards_proofs_precomputation gs_worker; transport_layer; profile_ctxt; + last_finalized_level; } let may_reconstruct ~reconstruct slot_id t = @@ -156,6 +161,10 @@ let get_cryptobox ctxt = ctxt.cryptobox let get_proto_parameters ctxt = ctxt.proto_parameters +let set_last_finalized_level ctxt level = ctxt.last_finalized_level <- level + +let get_last_finalized_level ctxt = ctxt.last_finalized_level + let get_shards_proofs_precomputation ctxt = ctxt.shards_proofs_precomputation let get_store ctxt = ctxt.store diff --git a/src/bin_dal_node/node_context.mli b/src/bin_dal_node/node_context.mli index 34b83a9955c4..d16d6fc43c57 100644 --- a/src/bin_dal_node/node_context.mli +++ b/src/bin_dal_node/node_context.mli @@ -40,6 +40,7 @@ val init : Gossipsub.Worker.t -> Gossipsub.Transport_layer.t -> Tezos_rpc.Context.generic -> + last_finalized_level:int32 -> t (** Returns all the registered plugins *) @@ -101,6 +102,13 @@ val get_cryptobox : t -> Cryptobox.t (** [get_proto_parameters ctxt] returns the DAL node's current protocol parameters. *) val get_proto_parameters : t -> Dal_plugin.proto_parameters +(** Update the node's last finalized level. *) +val set_last_finalized_level : t -> int32 -> unit + +(** Get the node's last finalized level. This level may be equal or higher than + the node's last processed level. *) +val get_last_finalized_level : t -> int32 + (** [get_shards_proofs_precomputation ctxt] returns the shards proof's precomputation. *) val get_shards_proofs_precomputation : t -> Cryptobox.shards_proofs_precomputation option -- GitLab From 0dc6edf406fa932b9f0dba56c798828afc915df4 Mon Sep 17 00:00:00 2001 From: Eugen Zalinescu Date: Thu, 6 Feb 2025 13:06:21 +0100 Subject: [PATCH 2/3] DAL/Node: use last_finalized_level instead of last_processed_level in warning more exactly in the warnning about the DAL node lagging wrt the L1 node --- src/bin_dal_node/RPC_server.ml | 48 ++++++++++++++-------------------- 1 file changed, 19 insertions(+), 29 deletions(-) diff --git a/src/bin_dal_node/RPC_server.ml b/src/bin_dal_node/RPC_server.ml index d0b2b1170cfd..4d7815813d3d 100644 --- a/src/bin_dal_node/RPC_server.ml +++ b/src/bin_dal_node/RPC_server.ml @@ -382,33 +382,22 @@ module Profile_handlers = struct in Lwt.return_unit - let warn_if_lagging store ~attestation_level = - let open Lwt_result_syntax in - let*! last_processed_level = - let last_processed_level_store = Store.last_processed_level store in - Store.Last_processed_level.load last_processed_level_store - in - match last_processed_level with - | Ok (Some lpl) -> - (* The L1 node's level is at least [current_level = lpl + 2], because the - DAL node processes blocks with a delay of two levels, to be sure that - processed blocks are final. *) - let current_level = Int32.add lpl 2l in - (* The baker's current level is the same as its L1 node and is the one - of the latest seen proposal (ie block). The baker asks for slots' - status when it has seen a proposal at [attestation_level - 1]. *) - let current_baker_level = Int32.sub attestation_level 1l in - (* We check that the baker is not in advance wrt the DAL node, which would - mean that the DAL node is lagging. We allow a slack of 1 level. *) - if Int32.succ current_level < current_baker_level then - Event.emit_get_attestable_slots_future_level_warning - ~current_level - ~current_baker_level - else Lwt.return_unit - | _ -> - (* We simply don't do anything if we couldn't obtain the - [last_processed_level]. This should not happen though. *) - Lwt.return_unit + let warn_if_lagging ~last_finalized_level ~attestation_level = + (* The L1 node's level is at least [last_finalized_level + 2], because the + DAL node processes blocks with a delay of two levels, to be sure that + processed blocks are final. *) + let current_level = Int32.add last_finalized_level 2l in + (* The baker's current level is the same as its L1 node and is the level + of the latest seen proposal (ie block). The baker asks for slots' + status when it has seen a proposal at [attestation_level - 1]. *) + let current_baker_level = Int32.sub attestation_level 1l in + (* We check that the baker is not in advance wrt the DAL node, which would + mean that the DAL node is lagging. We allow a slack of 1 level. *) + if Int32.succ current_level < current_baker_level then + Event.emit_get_attestable_slots_future_level_warning + ~current_level + ~current_baker_level + else Lwt.return_unit let is_slot_attestable_with_traps shards_store traps_fraction pkh assigned_shard_indexes slot_id = @@ -498,9 +487,9 @@ module Profile_handlers = struct in call_handler1 (fun () -> let open Lwt_result_syntax in - let store = Node_context.get_store ctxt in + let last_finalized_level = Node_context.get_last_finalized_level ctxt in let attestation_level = Int32.pred attested_level in - let*! () = warn_if_lagging store ~attestation_level in + let*! () = warn_if_lagging ~last_finalized_level ~attestation_level in (* For retrieving the assigned shard indexes, we consider the committee at [attestation_level], because the (DAL) attestations in the blocks at level [attested_level] refer to the predecessor level. *) @@ -511,6 +500,7 @@ module Profile_handlers = struct ~level:attestation_level |> Errors.other_lwt_result in + let store = Node_context.get_store ctxt in let proto_parameters = Node_context.get_proto_parameters ctxt in get_attestable_slots ~shard_indices -- GitLab From 6af407029aaaf9fd1ccd005a23c8c571cbbf99a2 Mon Sep 17 00:00:00 2001 From: Eugen Zalinescu Date: Thu, 6 Feb 2025 13:47:20 +0100 Subject: [PATCH 3/3] DAL/Node: use last_finalized_level instead of last_processed_level in another warning more exactly, in the warning about a registered pkh not being a delegate --- src/bin_dal_node/daemon.ml | 5 +---- src/bin_dal_node/node_context.ml | 36 +++++++++++-------------------- src/bin_dal_node/node_context.mli | 6 ++---- 3 files changed, 15 insertions(+), 32 deletions(-) diff --git a/src/bin_dal_node/daemon.ml b/src/bin_dal_node/daemon.ml index ed21b2f47044..dd27d32ef22b 100644 --- a/src/bin_dal_node/daemon.ml +++ b/src/bin_dal_node/daemon.ml @@ -1411,10 +1411,7 @@ let run ~data_dir ~configuration_override = let* () = match Profile_manager.get_profiles profile_ctxt with | Operator profile -> - Node_context.warn_if_attesters_not_delegates - ctxt - ~level:head_level - profile + Node_context.warn_if_attesters_not_delegates ctxt profile | _ -> return_unit in Gossipsub.Worker.Validate_message_hook.set diff --git a/src/bin_dal_node/node_context.ml b/src/bin_dal_node/node_context.ml index 19f2c0c62445..308268113b97 100644 --- a/src/bin_dal_node/node_context.ml +++ b/src/bin_dal_node/node_context.ml @@ -211,34 +211,22 @@ let version {config; _} = let network_name = config.Configuration_file.network_name in Types.Version.make ~network_version:(Gossipsub.version ~network_name) -(* TODO: https://gitlab.com/tezos/tezos/-/issues/7706 - This level argument would not be needed if we had the head level in the context. *) -let warn_if_attesters_not_delegates ctxt ?level operator_profiles = +let warn_if_attesters_not_delegates ctxt operator_profiles = let open Lwt_result_syntax in let pkh_set = Operator_profile.attesters operator_profiles in if Signature.Public_key_hash.Set.is_empty pkh_set then return_unit else - let* level_opt = - match level with - | Some _ -> return level - | None -> - let store = get_store ctxt in - let lpl_store = Store.last_processed_level store in - Store.Last_processed_level.load lpl_store - in - Option.iter_es - (fun level -> - let cctxt = get_tezos_node_cctxt ctxt in - let*? (module Plugin) = get_plugin_for_level ctxt ~level in - Signature.Public_key_hash.Set.iter_es - (fun pkh -> - let* is_delegate = Plugin.is_delegate cctxt ~pkh in - if not is_delegate then - let*! () = Event.emit_registered_pkh_not_a_delegate ~pkh in - return_unit - else return_unit) - pkh_set) - level_opt + let level = get_last_finalized_level ctxt in + let cctxt = get_tezos_node_cctxt ctxt in + let*? (module Plugin) = get_plugin_for_level ctxt ~level in + Signature.Public_key_hash.Set.iter_es + (fun pkh -> + let* is_delegate = Plugin.is_delegate cctxt ~pkh in + if not is_delegate then + let*! () = Event.emit_registered_pkh_not_a_delegate ~pkh in + return_unit + else return_unit) + pkh_set module P2P = struct let connect {transport_layer; _} ?timeout point = diff --git a/src/bin_dal_node/node_context.mli b/src/bin_dal_node/node_context.mli index d16d6fc43c57..5116a7feed9a 100644 --- a/src/bin_dal_node/node_context.mli +++ b/src/bin_dal_node/node_context.mli @@ -177,11 +177,9 @@ val fetch_committee : val version : t -> Types.Version.t (** Emit a warning for each public key hash in the given operator profile (if - any) that is not that of a L1-registered delegate. The optional [level] - argument is used to specify for which level to obtain the plugin; if not - given the last process level is used (if found in the store). *) + any) that is not that of a L1-registered delegate. *) val warn_if_attesters_not_delegates : - t -> ?level:int32 -> Operator_profile.t -> unit tzresult Lwt.t + t -> Operator_profile.t -> unit tzresult Lwt.t (** Module for P2P-related accessors. *) module P2P : sig -- GitLab