From ba9b6915c887984355080d50bec76d5c086e07ce Mon Sep 17 00:00:00 2001 From: Arnaud Date: Thu, 20 Nov 2025 15:28:01 +0100 Subject: [PATCH 1/7] MIR/big_map: Change name fromLazyStorage to FromId --- contrib/mir/src/ast.rs | 2 +- contrib/mir/src/ast/big_map.rs | 37 +++++++++---------- contrib/mir/src/interpreter.rs | 28 ++++++-------- contrib/mir/src/typechecker.rs | 8 ++-- .../tezos_execution/src/mir_ctx.rs | 8 ++-- 5 files changed, 37 insertions(+), 46 deletions(-) diff --git a/contrib/mir/src/ast.rs b/contrib/mir/src/ast.rs index 969308abe067..4149386a3c49 100644 --- a/contrib/mir/src/ast.rs +++ b/contrib/mir/src/ast.rs @@ -376,7 +376,7 @@ impl<'a> IntoMicheline<'a> for TypedValue<'a> { m.into_iter() .map(|(key, val)| V::prim2(arena, Prim::Elt, go(key), go(val))), )), - big_map::BigMapContent::FromLazyStorage(m) => { + big_map::BigMapContent::FromId(m) => { let id_part = V::Int(m.id.value.into()); let overlay_empty = m.overlay.is_empty(); let map_part = V::Seq(V::alloc_iter( diff --git a/contrib/mir/src/ast/big_map.rs b/contrib/mir/src/ast/big_map.rs index e014bc3a88bf..498ab87a0e07 100644 --- a/contrib/mir/src/ast/big_map.rs +++ b/contrib/mir/src/ast/big_map.rs @@ -67,7 +67,7 @@ impl BigMapId { /// lazy storage, and the in-memory overlay that carries a diff from /// the map in the lazy storage. #[derive(Debug, Clone, Eq, PartialEq)] -pub struct BigMapFromLazyStorage<'a> { +pub struct BigMapFromId<'a> { /// Id of the big map in the lazy storage. pub id: BigMapId, @@ -90,7 +90,7 @@ pub enum BigMapContent<'a> { InMemory(BTreeMap, TypedValue<'a>>), /// Otherwise they come from the lazy storage and have both an /// identifier and an overlay - FromLazyStorage(BigMapFromLazyStorage<'a>), + FromId(BigMapFromId<'a>), } /// Represents a big_map value. @@ -134,7 +134,7 @@ impl<'a> BigMap<'a> { ) -> Result>, LazyStorageError> { Ok(match &self.content { BigMapContent::InMemory(m) => m.get(key).cloned(), - BigMapContent::FromLazyStorage(BigMapFromLazyStorage { id, overlay }) => { + BigMapContent::FromId(BigMapFromId { id, overlay }) => { match overlay.get(key) { // If the key is mentioned in the overlay, the associated value is // always used, even if it is `None` (and `get` returned @@ -154,7 +154,7 @@ impl<'a> BigMap<'a> { ) -> Result { Ok(match &self.content { BigMapContent::InMemory(m) => m.get(key).is_some(), - BigMapContent::FromLazyStorage(BigMapFromLazyStorage { id, overlay }) => { + BigMapContent::FromId(BigMapFromId { id, overlay }) => { match overlay.get(key) { // If the key is mentioned in the overlay, the associated value is // always used, even if it is `None` (and `get` returned @@ -177,7 +177,7 @@ impl<'a> BigMap<'a> { m.remove(&key); } }, - BigMapContent::FromLazyStorage(BigMapFromLazyStorage { id: _, overlay }) => { + BigMapContent::FromId(BigMapFromId { id: _, overlay }) => { overlay.insert(key, value); } } @@ -187,9 +187,7 @@ impl<'a> BigMap<'a> { pub fn len_for_gas(&self) -> usize { match &self.content { BigMapContent::InMemory(m) => m.len(), - BigMapContent::FromLazyStorage(BigMapFromLazyStorage { id: _, overlay }) => { - overlay.len() - } + BigMapContent::FromId(BigMapFromId { id: _, overlay }) => overlay.len(), } } } @@ -534,7 +532,7 @@ mod test_big_map_operations { storage .big_map_update(&map_id, TypedValue::int(2), Some(TypedValue::int(2))) .unwrap(); - let content = BigMapContent::FromLazyStorage(BigMapFromLazyStorage { + let content = BigMapContent::FromId(BigMapFromId { id: map_id, overlay: BTreeMap::from([ (TypedValue::int(1), Some(TypedValue::int(-1))), @@ -649,7 +647,7 @@ impl<'a> TypedValue<'a> { /// identifiers. pub fn view_big_map_ids(&mut self, out: &mut Vec) { self.collect_big_maps(&mut |m| { - if let BigMapContent::FromLazyStorage(content) = &m.content { + if let BigMapContent::FromId(content) = &m.content { out.push(content.id.clone()) } }); @@ -701,14 +699,13 @@ pub fn dump_big_map_updates<'a>( // de-facto copied, so the vector will usually stay empty and produce no // allocations. type NonEmpty = (T, Vec); - let mut grouped_maps: BTreeMap> = - BTreeMap::new(); + let mut grouped_maps: BTreeMap> = BTreeMap::new(); for map in finished_with_maps { // the "map" variable has type (&mut &mut BigMap<'_>), the // following assignment casts it to as single &mut let map: &mut BigMap<'_> = map; match map.content { - BigMapContent::FromLazyStorage(ref mut m) => { + BigMapContent::FromId(ref mut m) => { // Insert to grouped_maps match grouped_maps.entry(m.id.clone()) { Entry::Vacant(e) => { @@ -727,7 +724,7 @@ pub fn dump_big_map_updates<'a>( .into_iter() .map(|(key, value)| (key, Some(value))), )?; - map.content = BigMapContent::FromLazyStorage(BigMapFromLazyStorage { + map.content = BigMapContent::FromId(BigMapFromId { id, overlay: BTreeMap::new(), }) @@ -769,7 +766,7 @@ mod test_big_map_to_storage_update { fn check_is_dumped_map(map: BigMap, id: BigMapId) { match map.content { BigMapContent::InMemory(_) => panic!("Big map has not been dumped"), - BigMapContent::FromLazyStorage(map) => { + BigMapContent::FromId(map) => { assert_eq!((map.id, map.overlay), (id, BTreeMap::new())) } }; @@ -816,7 +813,7 @@ mod test_big_map_to_storage_update { storage .big_map_update(&map_id, TypedValue::int(1), Some(TypedValue::int(1))) .unwrap(); - let content = BigMapContent::FromLazyStorage(BigMapFromLazyStorage { + let content = BigMapContent::FromId(BigMapFromId { id: map_id, overlay: BTreeMap::from([ (TypedValue::int(0), None), @@ -854,7 +851,7 @@ mod test_big_map_to_storage_update { let storage = &mut InMemoryLazyStorage::new(); let map_id1 = storage.big_map_new(&Type::Int, &Type::Int).unwrap(); let map_id2 = storage.big_map_new(&Type::Int, &Type::Int).unwrap(); - let content = BigMapContent::FromLazyStorage(BigMapFromLazyStorage { + let content = BigMapContent::FromId(BigMapFromId { id: map_id1.clone(), overlay: BTreeMap::from([(TypedValue::int(11), Some(TypedValue::int(11)))]), }); @@ -863,7 +860,7 @@ mod test_big_map_to_storage_update { key_type: Type::Int, value_type: Type::Int, }; - let content = BigMapContent::FromLazyStorage(BigMapFromLazyStorage { + let content = BigMapContent::FromId(BigMapFromId { id: map_id1, overlay: BTreeMap::from([(TypedValue::int(12), Some(TypedValue::int(12)))]), }); @@ -872,7 +869,7 @@ mod test_big_map_to_storage_update { key_type: Type::Int, value_type: Type::Int, }; - let content = BigMapContent::FromLazyStorage(BigMapFromLazyStorage { + let content = BigMapContent::FromId(BigMapFromId { id: map_id2, overlay: BTreeMap::from([(TypedValue::int(2), Some(TypedValue::int(2)))]), }); @@ -929,7 +926,7 @@ mod test_big_map_to_storage_update { storage .big_map_update(&map_id2, TypedValue::int(0), Some(TypedValue::int(0))) .unwrap(); - let content = BigMapContent::FromLazyStorage(BigMapFromLazyStorage { + let content = BigMapContent::FromId(BigMapFromId { id: map_id1.clone(), overlay: BTreeMap::from([(TypedValue::int(1), Some(TypedValue::int(1)))]), }); diff --git a/contrib/mir/src/interpreter.rs b/contrib/mir/src/interpreter.rs index 13eed9f8f09a..c553715f6cde 100644 --- a/contrib/mir/src/interpreter.rs +++ b/contrib/mir/src/interpreter.rs @@ -3659,7 +3659,7 @@ mod interpreter_tests { Some(TypedValue::String("foo".to_owned())), ) .unwrap(); - let content = big_map::BigMapContent::FromLazyStorage(big_map::BigMapFromLazyStorage { + let content = big_map::BigMapContent::FromId(big_map::BigMapFromId { id: big_map_id, overlay: BTreeMap::from([( TypedValue::int(2), @@ -3852,7 +3852,7 @@ mod interpreter_tests { ], ) .unwrap(); - let content = big_map::BigMapContent::FromLazyStorage(big_map::BigMapFromLazyStorage { + let content = big_map::BigMapContent::FromId(big_map::BigMapFromId { id: big_map_id, overlay: BTreeMap::new(), }); @@ -4295,7 +4295,7 @@ mod interpreter_tests { ctx.big_map_storage .big_map_bulk_update(&id, content) .unwrap(); - let content = big_map::BigMapContent::FromLazyStorage(big_map::BigMapFromLazyStorage { + let content = big_map::BigMapContent::FromId(big_map::BigMapFromId { id: id.clone(), overlay: overlay.into_iter().collect(), }); @@ -4316,12 +4316,10 @@ mod interpreter_tests { assert_eq!( stack, stk![TypedValue::BigMap(BigMap { - content: big_map::BigMapContent::FromLazyStorage( - big_map::BigMapFromLazyStorage { - id, - overlay: result.into_iter().collect() - } - ), + content: big_map::BigMapContent::FromId(big_map::BigMapFromId { + id, + overlay: result.into_iter().collect() + }), key_type: Type::Int, value_type: Type::String, })] @@ -4383,7 +4381,7 @@ mod interpreter_tests { ctx.big_map_storage .big_map_bulk_update(&id, content) .unwrap(); - let content = big_map::BigMapContent::FromLazyStorage(big_map::BigMapFromLazyStorage { + let content = big_map::BigMapContent::FromId(big_map::BigMapFromId { id: id.clone(), overlay: overlay.into_iter().collect(), }); @@ -4409,12 +4407,10 @@ mod interpreter_tests { stack, stk![ TypedValue::BigMap(BigMap { - content: big_map::BigMapContent::FromLazyStorage( - big_map::BigMapFromLazyStorage { - id, - overlay: result.into_iter().collect() - } - ), + content: big_map::BigMapContent::FromId(big_map::BigMapFromId { + id, + overlay: result.into_iter().collect() + }), key_type: Type::Int, value_type: Type::String, }), diff --git a/contrib/mir/src/typechecker.rs b/contrib/mir/src/typechecker.rs index 23cf84914131..6c3fab52d3a8 100644 --- a/contrib/mir/src/typechecker.rs +++ b/contrib/mir/src/typechecker.rs @@ -2512,7 +2512,7 @@ pub(crate) fn typecheck_value<'a>( } else { BTreeMap::default() }; - let content = big_map::BigMapContent::FromLazyStorage(big_map::BigMapFromLazyStorage { + let content = big_map::BigMapContent::FromId(big_map::BigMapFromId { id: big_map_id, overlay, }); @@ -6227,7 +6227,7 @@ mod typecheck_tests { &Type::new_big_map(Type::Int, Type::Int) ), Ok(TypedValue::BigMap(BigMap { - content: big_map::BigMapContent::FromLazyStorage(big_map::BigMapFromLazyStorage { + content: big_map::BigMapContent::FromId(big_map::BigMapFromId { id: id0.clone(), overlay: BTreeMap::new() }), @@ -6319,7 +6319,7 @@ mod typecheck_tests { &Type::new_big_map(Type::Int, Type::Int) ), Ok(TypedValue::BigMap(BigMap { - content: big_map::BigMapContent::FromLazyStorage(big_map::BigMapFromLazyStorage { + content: big_map::BigMapContent::FromId(big_map::BigMapFromId { id: id0.clone(), overlay: BTreeMap::from([(TypedValue::int(7), Some(TypedValue::int(8)))]) }), @@ -6336,7 +6336,7 @@ mod typecheck_tests { &Type::new_big_map(Type::Int, Type::Int) ), Ok(TypedValue::BigMap(BigMap { - content: big_map::BigMapContent::FromLazyStorage(big_map::BigMapFromLazyStorage { + content: big_map::BigMapContent::FromId(big_map::BigMapFromId { id: id0, overlay: BTreeMap::from([(TypedValue::int(7), None)]) }), diff --git a/etherlink/kernel_latest/tezos_execution/src/mir_ctx.rs b/etherlink/kernel_latest/tezos_execution/src/mir_ctx.rs index c8d1c5cb2662..db9e873a1b48 100644 --- a/etherlink/kernel_latest/tezos_execution/src/mir_ctx.rs +++ b/etherlink/kernel_latest/tezos_execution/src/mir_ctx.rs @@ -616,9 +616,7 @@ impl<'a, Host: Runtime> LazyStorage<'a> for TcCtx<'a, Host> { mod tests { use super::*; use crate::gas::TezlinkOperationGas; - use mir::ast::big_map::{ - dump_big_map_updates, BigMap, BigMapContent, BigMapFromLazyStorage, - }; + use mir::ast::big_map::{dump_big_map_updates, BigMap, BigMapContent, BigMapFromId}; use std::collections::BTreeMap; use tezos_evm_runtime::runtime::MockKernelHost; @@ -638,7 +636,7 @@ mod tests { fn check_is_dumped_map(map: BigMap, id: BigMapId) { match map.content { BigMapContent::InMemory(_) => panic!("Big map has not been dumped"), - BigMapContent::FromLazyStorage(map) => { + BigMapContent::FromId(map) => { assert_eq!((map.id, map.overlay), (id, BTreeMap::new())) } }; @@ -888,7 +886,7 @@ mod tests { storage .big_map_update(&map_id2, TypedValue::int(0), Some(TypedValue::int(0))) .unwrap(); - let content_diff = BigMapContent::FromLazyStorage(BigMapFromLazyStorage { + let content_diff = BigMapContent::FromId(BigMapFromId { id: map_id1.clone(), overlay: BTreeMap::from([(TypedValue::int(1), Some(TypedValue::int(1)))]), }); -- GitLab From eb308618e32d7800a7ef4d9dd425b930ec4369ea Mon Sep 17 00:00:00 2001 From: Arnaud Date: Thu, 20 Nov 2025 15:35:39 +0100 Subject: [PATCH 2/7] MIR/big_map: Introduce temporary id in BigMapId and use it for copy and allocation --- contrib/mir/src/ast/big_map.rs | 69 +++++++++++++++---- contrib/mir/src/interpreter.rs | 8 +-- contrib/mir/src/typechecker.rs | 2 +- .../tezos_execution/src/mir_ctx.rs | 19 +++-- 4 files changed, 72 insertions(+), 26 deletions(-) diff --git a/contrib/mir/src/ast/big_map.rs b/contrib/mir/src/ast/big_map.rs index 498ab87a0e07..a35a378f5c7b 100644 --- a/contrib/mir/src/ast/big_map.rs +++ b/contrib/mir/src/ast/big_map.rs @@ -4,7 +4,7 @@ //! `big_map` typed representation and utilities for working with `big_map`s. -use num_bigint::BigInt; +use num_bigint::{BigInt, Sign}; use num_traits::One; use std::{ collections::{btree_map::Entry, BTreeMap}, @@ -53,11 +53,20 @@ impl BigMapId { /// Get successor of the id pub fn succ(&self) -> Self { let Zarith(ref int_value) = self.value; - let result = int_value + BigInt::one(); + let result = if self.is_temporary() { + int_value - BigInt::one() + } else { + int_value + BigInt::one() + }; BigMapId { value: Zarith(result), } } + + /// Tells if a big_map id is temporary + pub fn is_temporary(&self) -> bool { + self.value.0.sign() == Sign::Minus + } } /// Represents the content of a big_map value in the case it is @@ -271,13 +280,18 @@ pub trait LazyStorage<'a> { &mut self, key_type: &Type, value_type: &Type, + temporary: bool, ) -> Result; /// Allocate a new big map, filling it with the contents from another map /// in the lazy storage. /// /// The specified big map id must point to a valid map in the lazy storage. - fn big_map_copy(&mut self, id: &BigMapId) -> Result; + fn big_map_copy( + &mut self, + id: &BigMapId, + temporary: bool, + ) -> Result; /// Remove a big map. /// @@ -335,6 +349,7 @@ impl<'a> MapInfo<'a> { #[derive(Clone, Debug, PartialEq, Eq)] pub struct InMemoryLazyStorage<'a> { next_id: BigMapId, + next_temp_id: BigMapId, big_maps: BTreeMap>, } @@ -343,6 +358,7 @@ impl<'a> InMemoryLazyStorage<'a> { pub fn new() -> Self { InMemoryLazyStorage { next_id: 0.into(), + next_temp_id: (-1).into(), big_maps: BTreeMap::new(), } } @@ -354,7 +370,11 @@ impl<'a> InMemoryLazyStorage<'a> { .max() .map(|id| id.succ()) .unwrap_or_else(|| 0.into()); - InMemoryLazyStorage { next_id, big_maps } + InMemoryLazyStorage { + next_id, + next_temp_id: (-1).into(), + big_maps, + } } fn get_next_id(&mut self) -> BigMapId { @@ -363,6 +383,12 @@ impl<'a> InMemoryLazyStorage<'a> { id } + fn get_next_temp_id(&mut self) -> BigMapId { + let id = self.next_temp_id.clone(); + self.next_id = self.next_temp_id.succ(); + id + } + pub(crate) fn big_map_get_type( &mut self, id: &BigMapId, @@ -432,8 +458,13 @@ impl<'a> LazyStorage<'a> for InMemoryLazyStorage<'a> { &mut self, key_type: &Type, value_type: &Type, + temporary: bool, ) -> Result { - let id = self.get_next_id(); + let id = if temporary { + self.get_next_temp_id() + } else { + self.get_next_id() + }; self.big_maps.insert( id.clone(), MapInfo { @@ -450,8 +481,16 @@ impl<'a> LazyStorage<'a> for InMemoryLazyStorage<'a> { Ok(()) } - fn big_map_copy(&mut self, copied_id: &BigMapId) -> Result { - let id = self.get_next_id(); + fn big_map_copy( + &mut self, + copied_id: &BigMapId, + temporary: bool, + ) -> Result { + let id = if temporary { + self.get_next_temp_id() + } else { + self.get_next_id() + }; let info = self.access_big_map(copied_id)?.clone(); self.big_maps.insert(id.clone(), info); Ok(id) @@ -522,7 +561,7 @@ mod test_big_map_operations { fn test_get_mem_backed_by_storage() { let arena = &Arena::new(); let storage = &mut InMemoryLazyStorage::new(); - let map_id = storage.big_map_new(&Type::Int, &Type::Int).unwrap(); + let map_id = storage.big_map_new(&Type::Int, &Type::Int, false).unwrap(); storage .big_map_update(&map_id, TypedValue::int(0), Some(TypedValue::int(0))) .unwrap(); @@ -717,7 +756,7 @@ pub fn dump_big_map_updates<'a>( BigMapContent::InMemory(ref mut m) => { // The entire big map is still in memory. We have to // create a new map in the storage. - let id = storage.big_map_new(&map.key_type, &map.value_type)?; + let id = storage.big_map_new(&map.key_type, &map.value_type, false)?; storage.big_map_bulk_update( &id, mem::take(m) @@ -745,7 +784,7 @@ pub fn dump_big_map_updates<'a>( // If there are any big maps with duplicate ID, we first copy them in // the storage. for map in other_maps { - let new_id = storage.big_map_copy(&id)?; + let new_id = storage.big_map_copy(&id, false)?; storage.big_map_bulk_update(&new_id, mem::take(&mut map.overlay))?; map.id = new_id } @@ -806,7 +845,7 @@ mod test_big_map_to_storage_update { #[test] fn test_map_updates_to_storage() { let storage = &mut InMemoryLazyStorage::new(); - let map_id = storage.big_map_new(&Type::Int, &Type::Int).unwrap(); + let map_id = storage.big_map_new(&Type::Int, &Type::Int, false).unwrap(); storage .big_map_update(&map_id, TypedValue::int(0), Some(TypedValue::int(0))) .unwrap(); @@ -849,8 +888,8 @@ mod test_big_map_to_storage_update { #[test] fn test_duplicate_ids() { let storage = &mut InMemoryLazyStorage::new(); - let map_id1 = storage.big_map_new(&Type::Int, &Type::Int).unwrap(); - let map_id2 = storage.big_map_new(&Type::Int, &Type::Int).unwrap(); + let map_id1 = storage.big_map_new(&Type::Int, &Type::Int, false).unwrap(); + let map_id2 = storage.big_map_new(&Type::Int, &Type::Int, false).unwrap(); let content = BigMapContent::FromId(BigMapFromId { id: map_id1.clone(), overlay: BTreeMap::from([(TypedValue::int(11), Some(TypedValue::int(11)))]), @@ -918,11 +957,11 @@ mod test_big_map_to_storage_update { #[test] fn test_remove_ids() { let storage = &mut InMemoryLazyStorage::new(); - let map_id1 = storage.big_map_new(&Type::Int, &Type::Int).unwrap(); + let map_id1 = storage.big_map_new(&Type::Int, &Type::Int, false).unwrap(); storage .big_map_update(&map_id1, TypedValue::int(0), Some(TypedValue::int(0))) .unwrap(); - let map_id2 = storage.big_map_new(&Type::Int, &Type::Int).unwrap(); + let map_id2 = storage.big_map_new(&Type::Int, &Type::Int, false).unwrap(); storage .big_map_update(&map_id2, TypedValue::int(0), Some(TypedValue::int(0))) .unwrap(); diff --git a/contrib/mir/src/interpreter.rs b/contrib/mir/src/interpreter.rs index c553715f6cde..082f9cda50ca 100644 --- a/contrib/mir/src/interpreter.rs +++ b/contrib/mir/src/interpreter.rs @@ -3650,7 +3650,7 @@ mod interpreter_tests { ctx.big_map_storage = InMemoryLazyStorage::new(); let big_map_id = ctx .big_map_storage - .big_map_new(&Type::Int, &Type::String) + .big_map_new(&Type::Int, &Type::String, false) .unwrap(); ctx.big_map_storage .big_map_update( @@ -3835,7 +3835,7 @@ mod interpreter_tests { let mut ctx = Ctx::default(); let big_map_id = ctx .big_map_storage - .big_map_new(&Type::Int, &Type::String) + .big_map_new(&Type::Int, &Type::String, false) .unwrap(); ctx.big_map_storage .big_map_bulk_update( @@ -4290,7 +4290,7 @@ mod interpreter_tests { let mut ctx = Ctx::default(); let id = ctx .big_map_storage - .big_map_new(&Type::Int, &Type::String) + .big_map_new(&Type::Int, &Type::String, false) .unwrap(); ctx.big_map_storage .big_map_bulk_update(&id, content) @@ -4376,7 +4376,7 @@ mod interpreter_tests { let mut ctx = Ctx::default(); let id = ctx .big_map_storage - .big_map_new(&Type::Int, &Type::String) + .big_map_new(&Type::Int, &Type::String, false) .unwrap(); ctx.big_map_storage .big_map_bulk_update(&id, content) diff --git a/contrib/mir/src/typechecker.rs b/contrib/mir/src/typechecker.rs index 6c3fab52d3a8..91377a9f162f 100644 --- a/contrib/mir/src/typechecker.rs +++ b/contrib/mir/src/typechecker.rs @@ -6214,7 +6214,7 @@ mod typecheck_tests { fn test_parsing_big_map_value() { let mut ctx = Ctx::default(); let storage = &mut ctx.big_map_storage; - let id0 = storage.big_map_new(&Type::Int, &Type::Int).unwrap(); + let id0 = storage.big_map_new(&Type::Int, &Type::Int, false).unwrap(); storage .big_map_update(&id0, TypedValue::int(5), Some(TypedValue::int(5))) .unwrap(); diff --git a/etherlink/kernel_latest/tezos_execution/src/mir_ctx.rs b/etherlink/kernel_latest/tezos_execution/src/mir_ctx.rs index db9e873a1b48..0208940e387d 100644 --- a/etherlink/kernel_latest/tezos_execution/src/mir_ctx.rs +++ b/etherlink/kernel_latest/tezos_execution/src/mir_ctx.rs @@ -540,6 +540,7 @@ impl<'a, Host: Runtime> LazyStorage<'a> for TcCtx<'a, Host> { &mut self, key_type: &Type, value_type: &Type, + _temporary: bool, ) -> Result { let arena = Arena::new(); let next_id_path = next_id_path(self.context)?; @@ -561,7 +562,11 @@ impl<'a, Host: Runtime> LazyStorage<'a> for TcCtx<'a, Host> { Ok(id) } - fn big_map_copy(&mut self, id: &BigMapId) -> Result { + fn big_map_copy( + &mut self, + id: &BigMapId, + _temporary: bool, + ) -> Result { let next_id_path = next_id_path(self.context)?; let dest_id: BigMapId = read_nom_value(self.host, &next_id_path) .map_err(|e| LazyStorageError::NomReadError(e.to_string()))?; @@ -736,7 +741,9 @@ mod tests { fn test_map_updates_to_storage() { let mut host = MockKernelHost::default(); make_default_ctx!(storage, &mut host, &Context::init_context()); - let map_id = storage.big_map_new(&Type::Int, &Type::String).unwrap(); + let map_id = storage + .big_map_new(&Type::Int, &Type::String, false) + .unwrap(); storage .big_map_update( &map_id, @@ -826,7 +833,7 @@ mod tests { check_is_dumped_map(map, 0.into()); let copied_id = storage - .big_map_copy(&0.into()) + .big_map_copy(&0.into(), false) .expect("Failed to copy big_map in storage"); assert_eq!(copied_id, 1.into()); @@ -848,7 +855,7 @@ mod tests { make_default_ctx!(storage, &mut host, &Context::init_context()); let key_type = Type::Int; let value_type = Type::Int; - let map_id = storage.big_map_new(&key_type, &value_type).unwrap(); + let map_id = storage.big_map_new(&key_type, &value_type, false).unwrap(); let key = TypedValue::int(0); let value = TypedValue::int(0); storage @@ -878,11 +885,11 @@ mod tests { fn test_remove_with_dump() { let mut host = MockKernelHost::default(); make_default_ctx!(storage, &mut host, &Context::init_context()); - let map_id1 = storage.big_map_new(&Type::Int, &Type::Int).unwrap(); + let map_id1 = storage.big_map_new(&Type::Int, &Type::Int, false).unwrap(); storage .big_map_update(&map_id1, TypedValue::int(0), Some(TypedValue::int(0))) .unwrap(); - let map_id2 = storage.big_map_new(&Type::Int, &Type::Int).unwrap(); + let map_id2 = storage.big_map_new(&Type::Int, &Type::Int, false).unwrap(); storage .big_map_update(&map_id2, TypedValue::int(0), Some(TypedValue::int(0))) .unwrap(); -- GitLab From 99e5e9c0b031d37d66c0d21fba37679a896b8e7a Mon Sep 17 00:00:00 2001 From: Arnaud Date: Thu, 20 Nov 2025 17:01:47 +0100 Subject: [PATCH 3/7] Tezlink/Kernel: Introduce a new function to generate temporary ids in the kernel --- .../kernel_latest/tezos_execution/src/lib.rs | 3 +- .../tezos_execution/src/mir_ctx.rs | 37 ++++++++++++------- 2 files changed, 26 insertions(+), 14 deletions(-) diff --git a/etherlink/kernel_latest/tezos_execution/src/lib.rs b/etherlink/kernel_latest/tezos_execution/src/lib.rs index db3b018e71ea..6119645c90ca 100644 --- a/etherlink/kernel_latest/tezos_execution/src/lib.rs +++ b/etherlink/kernel_latest/tezos_execution/src/lib.rs @@ -8,7 +8,7 @@ use context::Context; use mir::ast::{AddressHash, Entrypoint, OperationInfo, TransferTokens, TypedValue}; use mir::context::TypecheckingCtx; use mir::{ - ast::{IntoMicheline, Micheline}, + ast::{big_map::BigMapId, IntoMicheline, Micheline}, context::CtxTrait, gas::Gas, parser::Parser, @@ -898,6 +898,7 @@ fn apply_operation( context, gas: &mut gas, big_map_diff: &mut BTreeMap::new(), + next_temporary_id: BigMapId { value: (-1).into() }, }; let parser = Parser::new(); match &validated_operation.content.operation { diff --git a/etherlink/kernel_latest/tezos_execution/src/mir_ctx.rs b/etherlink/kernel_latest/tezos_execution/src/mir_ctx.rs index 0208940e387d..7ccae6c98b3b 100644 --- a/etherlink/kernel_latest/tezos_execution/src/mir_ctx.rs +++ b/etherlink/kernel_latest/tezos_execution/src/mir_ctx.rs @@ -43,6 +43,7 @@ pub struct TcCtx<'operation, Host: Runtime> { pub context: &'operation Context, pub gas: &'operation mut crate::gas::TezlinkOperationGas, pub big_map_diff: &'operation mut BTreeMap, + pub next_temporary_id: BigMapId, } pub struct OperationCtx<'operation> { @@ -329,6 +330,21 @@ impl TcCtx<'_, Host> { }), ); } + + fn generate_id(&mut self, temporary: bool) -> Result { + if temporary { + let new_id = self.next_temporary_id.clone(); + self.next_temporary_id = new_id.succ(); + Ok(new_id) + } else { + let next_id_path = next_id_path(self.context)?; + let id: BigMapId = + read_nom_value(self.host, &next_id_path).unwrap_or(0.into()); + store_bin(&id.succ(), self.host, &next_id_path) + .map_err(|e| LazyStorageError::BinWriteError(e.to_string()))?; + Ok(id) + } + } } /// Function to retrieve the hash of a TypedValue. @@ -540,11 +556,10 @@ impl<'a, Host: Runtime> LazyStorage<'a> for TcCtx<'a, Host> { &mut self, key_type: &Type, value_type: &Type, - _temporary: bool, + temporary: bool, ) -> Result { let arena = Arena::new(); - let next_id_path = next_id_path(self.context)?; - let id: BigMapId = read_nom_value(self.host, &next_id_path).unwrap_or(0.into()); + let id = self.generate_id(temporary)?; let key_type_path = key_type_path(self.context, &id)?; let value_type_path = value_type_path(self.context, &id)?; let key_type_encoded = key_type.into_micheline_optimized_legacy(&arena).encode(); @@ -554,8 +569,6 @@ impl<'a, Host: Runtime> LazyStorage<'a> for TcCtx<'a, Host> { .store_write_all(&value_type_path, &value_type_encoded)?; self.host .store_write_all(&key_type_path, &key_type_encoded)?; - store_bin(&id.succ(), self.host, &next_id_path) - .map_err(|e| LazyStorageError::BinWriteError(e.to_string()))?; // Write in the diff that there was an allocation self.big_map_diff_alloc(id.value.clone(), key_type_encoded, value_type_encoded); @@ -565,11 +578,9 @@ impl<'a, Host: Runtime> LazyStorage<'a> for TcCtx<'a, Host> { fn big_map_copy( &mut self, id: &BigMapId, - _temporary: bool, + temporary: bool, ) -> Result { - let next_id_path = next_id_path(self.context)?; - let dest_id: BigMapId = read_nom_value(self.host, &next_id_path) - .map_err(|e| LazyStorageError::NomReadError(e.to_string()))?; + let dest_id = self.generate_id(temporary)?; // Retrieve the path of the key_type let src_key_type_path = key_type_path(self.context, id)?; @@ -591,9 +602,6 @@ impl<'a, Host: Runtime> LazyStorage<'a> for TcCtx<'a, Host> { // Copy the content of the big_map BigMapKeys::copy_keys_in_storage(self.host, self.context, id, &dest_id)?; - store_bin(&dest_id.succ(), self.host, &next_id_path) - .map_err(|e| LazyStorageError::BinWriteError(e.to_string()))?; - // Write in the diff that there was a copy self.big_map_diff_copy(dest_id.value.clone(), id.value.clone()); Ok(dest_id) @@ -621,7 +629,9 @@ impl<'a, Host: Runtime> LazyStorage<'a> for TcCtx<'a, Host> { mod tests { use super::*; use crate::gas::TezlinkOperationGas; - use mir::ast::big_map::{dump_big_map_updates, BigMap, BigMapContent, BigMapFromId}; + use mir::ast::big_map::{ + dump_big_map_updates, BigMap, BigMapContent, BigMapFromId, BigMapId, + }; use std::collections::BTreeMap; use tezos_evm_runtime::runtime::MockKernelHost; @@ -633,6 +643,7 @@ mod tests { context: $context, gas: &mut gas, big_map_diff: &mut BTreeMap::new(), + next_temporary_id: BigMapId { value: (-1).into() }, }; }; } -- GitLab From 7dde307fafc8cb4b9cf650c0da479a4562ab8525 Mon Sep 17 00:00:00 2001 From: Arnaud Date: Thu, 20 Nov 2025 15:47:19 +0100 Subject: [PATCH 4/7] MIR/big_map: Introduce the temporary boolean to have a distinction between temporary and definitive result in the lazy storage --- contrib/mir/src/ast/big_map.rs | 23 +++++++--- contrib/mir/src/interpreter.rs | 45 ++++++++++++------- .../kernel_latest/tezos_execution/src/lib.rs | 2 +- .../tezos_execution/src/mir_ctx.rs | 5 ++- 4 files changed, 49 insertions(+), 26 deletions(-) diff --git a/contrib/mir/src/ast/big_map.rs b/contrib/mir/src/ast/big_map.rs index a35a378f5c7b..2cb9492bc8b7 100644 --- a/contrib/mir/src/ast/big_map.rs +++ b/contrib/mir/src/ast/big_map.rs @@ -704,6 +704,7 @@ pub fn dump_big_map_updates<'a>( storage: &mut (impl LazyStorage<'a> + ?Sized), started_with_map_ids: &[BigMapId], finished_with_maps: &mut [&mut BigMap<'a>], + temporary: bool, ) -> Result<(), LazyStorageError> { // Note: this function is similar to `extract_lazy_storage_diff` from the // Tezos protocol implementation. The difference is that we don't have @@ -717,7 +718,6 @@ pub fn dump_big_map_updates<'a>( // * If a contract produces an operation with a big map, we immediately // deduplicate big map ID there too (the Tezos protocol implementation does // not). - // * There is no need to implement temporary lazy storage for now. // The `finished_with_maps` vector above is supposed to contain all big maps // remaining on stack at the end of contract execution. After this function @@ -737,6 +737,9 @@ pub fn dump_big_map_updates<'a>( // that in the vast majority of the real-life cases big maps are not // de-facto copied, so the vector will usually stay empty and produce no // allocations. + // + // The temporary boolean means that the result computed by the function should be + // considered as a temporary big_map type NonEmpty = (T, Vec); let mut grouped_maps: BTreeMap> = BTreeMap::new(); for map in finished_with_maps { @@ -756,7 +759,7 @@ pub fn dump_big_map_updates<'a>( BigMapContent::InMemory(ref mut m) => { // The entire big map is still in memory. We have to // create a new map in the storage. - let id = storage.big_map_new(&map.key_type, &map.value_type, false)?; + let id = storage.big_map_new(&map.key_type, &map.value_type, temporary)?; storage.big_map_bulk_update( &id, mem::take(m) @@ -784,7 +787,7 @@ pub fn dump_big_map_updates<'a>( // If there are any big maps with duplicate ID, we first copy them in // the storage. for map in other_maps { - let new_id = storage.big_map_copy(&id, false)?; + let new_id = storage.big_map_copy(&id, temporary)?; storage.big_map_bulk_update(&new_id, mem::take(&mut map.overlay))?; map.id = new_id } @@ -823,7 +826,7 @@ mod test_big_map_to_storage_update { key_type: Type::Int, value_type: Type::Int, }; - dump_big_map_updates(storage, &[], &mut [&mut map]).unwrap(); + dump_big_map_updates(storage, &[], &mut [&mut map], false).unwrap(); check_is_dumped_map(map, 0.into()); assert_eq!( @@ -866,7 +869,7 @@ mod test_big_map_to_storage_update { key_type: Type::Int, value_type: Type::Int, }; - dump_big_map_updates(storage, &[], &mut [&mut map]).unwrap(); + dump_big_map_updates(storage, &[], &mut [&mut map], false).unwrap(); check_is_dumped_map(map, 0.into()); assert_eq!( @@ -917,7 +920,13 @@ mod test_big_map_to_storage_update { key_type: Type::Int, value_type: Type::Int, }; - dump_big_map_updates(storage, &[], &mut [&mut map1_1, &mut map1_2, &mut map2]).unwrap(); + dump_big_map_updates( + storage, + &[], + &mut [&mut map1_1, &mut map1_2, &mut map2], + false, + ) + .unwrap(); check_is_dumped_map(map1_1, 0.into()); check_is_dumped_map(map1_2, 2.into()); // newly created map @@ -974,7 +983,7 @@ mod test_big_map_to_storage_update { key_type: Type::Int, value_type: Type::Int, }; - dump_big_map_updates(storage, &[map_id1, map_id2], &mut [&mut map1]).unwrap(); + dump_big_map_updates(storage, &[map_id1, map_id2], &mut [&mut map1], false).unwrap(); assert_eq!( storage.big_maps, diff --git a/contrib/mir/src/interpreter.rs b/contrib/mir/src/interpreter.rs index 082f9cda50ca..3e1026b197af 100644 --- a/contrib/mir/src/interpreter.rs +++ b/contrib/mir/src/interpreter.rs @@ -121,23 +121,36 @@ impl<'a> ContractScript<'a> { // TODO: https://gitlab.com/tezos/tezos/-/issues/8061 // Handle errors instead of panicking. - let mut result = stack.pop().expect("empty execution stack"); - let mut finished_with_maps = vec![]; - result.view_big_maps_mut(&mut finished_with_maps); - dump_big_map_updates( - *ctx.lazy_storage(), - &started_with_map_ids, - &mut finished_with_maps, - )?; + let result = stack.pop().expect("empty execution stack"); match result { - V::Pair(p) => match *p { - (V::List(vec), storage) => Ok(( - vec.into_iter() - .map(|x| (*irrefutable_match!(x; V::Operation))), - storage, - )), - (v, _) => panic!("expected `list operation`, got {v:?}"), - }, + V::Pair(p) => { + let (mut operation_list, mut storage) = *p; + // Handle storage big_maps (those big_maps are definitive and will be stored in the durable_storage) + let mut storage_big_maps = vec![]; + storage.view_big_maps_mut(&mut storage_big_maps); + let lazy_storage = *ctx.lazy_storage(); + dump_big_map_updates( + lazy_storage, + &started_with_map_ids, + &mut storage_big_maps, + false, + )?; + // Handle big_maps that appears in the operation list, those big_maps are temporary and it depends to + // the internal operation to determine what to do with it + let mut operations_big_maps = vec![]; + operation_list.view_big_maps_mut(&mut operations_big_maps); + dump_big_map_updates(lazy_storage, &[], &mut operations_big_maps, true)?; + + match operation_list { + V::List(vec) => Ok(( + vec.into_iter() + .map(|x| (*irrefutable_match!(x; V::Operation))), + storage, + )), + v => panic!("expected `list operation`, got {v:?}"), + } + } + v => panic!("expected `pair 'a 'b`, got {v:?}"), } } diff --git a/etherlink/kernel_latest/tezos_execution/src/lib.rs b/etherlink/kernel_latest/tezos_execution/src/lib.rs index 6119645c90ca..925e250b7942 100644 --- a/etherlink/kernel_latest/tezos_execution/src/lib.rs +++ b/etherlink/kernel_latest/tezos_execution/src/lib.rs @@ -531,7 +531,7 @@ fn handle_storage_with_big_maps<'a, Host: Runtime>( storage.view_big_maps_mut(&mut big_maps); // Dump big_map allocation, starting with empty big_maps - mir::ast::big_map::dump_big_map_updates(ctx, &[], &mut big_maps) + mir::ast::big_map::dump_big_map_updates(ctx, &[], &mut big_maps, false) .map_err(|err| OriginationError::MirBigMapAllocation(err.to_string()))?; let storage = storage .into_micheline_optimized_legacy(&parser.arena) diff --git a/etherlink/kernel_latest/tezos_execution/src/mir_ctx.rs b/etherlink/kernel_latest/tezos_execution/src/mir_ctx.rs index 7ccae6c98b3b..34f9c81ada2c 100644 --- a/etherlink/kernel_latest/tezos_execution/src/mir_ctx.rs +++ b/etherlink/kernel_latest/tezos_execution/src/mir_ctx.rs @@ -734,7 +734,7 @@ mod tests { key_type: Type::Int, value_type: Type::String, }; - dump_big_map_updates(&mut storage, &[], &mut [&mut map]).unwrap(); + dump_big_map_updates(&mut storage, &[], &mut [&mut map], false).unwrap(); check_is_dumped_map(map, 0.into()); @@ -839,7 +839,7 @@ mod tests { key_type: Type::Int, value_type: Type::String, }; - dump_big_map_updates(&mut storage, &[], &mut [&mut map]).unwrap(); + dump_big_map_updates(&mut storage, &[], &mut [&mut map], false).unwrap(); check_is_dumped_map(map, 0.into()); @@ -918,6 +918,7 @@ mod tests { &mut storage, &[map_id1.clone(), map_id2.clone()], &mut [&mut map1], + false, ) .unwrap(); -- GitLab From a820a9d4835f560e634f6da4a8d583d06d149268 Mon Sep 17 00:00:00 2001 From: Arnaud Date: Thu, 20 Nov 2025 16:15:58 +0100 Subject: [PATCH 5/7] MIR/big_map: Handle copy for temporary big_map --- contrib/mir/src/ast/big_map.rs | 8 +++++++- etherlink/kernel_latest/Cargo.toml | 2 +- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/contrib/mir/src/ast/big_map.rs b/contrib/mir/src/ast/big_map.rs index 2cb9492bc8b7..2c5928c3d7fd 100644 --- a/contrib/mir/src/ast/big_map.rs +++ b/contrib/mir/src/ast/big_map.rs @@ -791,8 +791,14 @@ pub fn dump_big_map_updates<'a>( storage.big_map_bulk_update(&new_id, mem::take(&mut map.overlay))?; map.id = new_id } + if temporary || id.is_temporary() { + // The only remaining big map should also be copied if the result is expected to be temporary + // The big_map should also be copied if it's a temporary one + let new_id = storage.big_map_copy(&id, temporary)?; + main_map.id = new_id; + } // The only remaining big map we update in the lazy storage in-place. - storage.big_map_bulk_update(&id, mem::take(&mut main_map.overlay))? + storage.big_map_bulk_update(&main_map.id, mem::take(&mut main_map.overlay))? } Ok(()) diff --git a/etherlink/kernel_latest/Cargo.toml b/etherlink/kernel_latest/Cargo.toml index 6b2e23ca4f3c..8f192cacde67 100644 --- a/etherlink/kernel_latest/Cargo.toml +++ b/etherlink/kernel_latest/Cargo.toml @@ -113,7 +113,7 @@ alloy-primitives = { version = "1.2.0", default-features = false } alloy-consensus = { version = "1.0.32", default-features = false } # mir without bls nor transfer of big maps -mir = { path = "../../contrib/mir", default-features = false } +mir = { path = "../../contrib/mir", default-features = false, features = ["allow_lazy_storage_transfer"] } typed-arena = "2" # miscs -- GitLab From a261f9012c5fa8a93881280af5f3c43a35a7a419 Mon Sep 17 00:00:00 2001 From: Arnaud Date: Fri, 21 Nov 2025 14:00:43 +0100 Subject: [PATCH 6/7] Tezlink/Kernel: Retrieve the big_map diff before executing the next internal operation This is important to do that before execute_internal_operations as we use std::mem::take to retrieve the big_map_diff from the context. If we don't do that, big_map_diff are going to be packed at the end --- etherlink/kernel_latest/tezos_execution/src/lib.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/etherlink/kernel_latest/tezos_execution/src/lib.rs b/etherlink/kernel_latest/tezos_execution/src/lib.rs index 925e250b7942..7a5e26757749 100644 --- a/etherlink/kernel_latest/tezos_execution/src/lib.rs +++ b/etherlink/kernel_latest/tezos_execution/src/lib.rs @@ -405,6 +405,8 @@ fn transfer<'a, Host: Runtime>( // consumption, i.e. it does not include that of its internal // operations. let consumed_milligas = ctx.tc_ctx.gas.milligas_consumed_by_operation(); + let lazy_storage_diff = + convert_big_map_diff(std::mem::take(ctx.tc_ctx.big_map_diff)); execute_internal_operations( ctx.tc_ctx, ctx.operation_ctx, @@ -417,8 +419,6 @@ fn transfer<'a, Host: Runtime>( TransferError::FailedToExecuteInternalOperation(err.to_string()) })?; log!(ctx.host(), Debug, "Transfer operation succeeded"); - let lazy_storage_diff = - convert_big_map_diff(std::mem::take(ctx.tc_ctx.big_map_diff)); Ok(TransferSuccess { storage: Some(new_storage), lazy_storage_diff, -- GitLab From b79b4c1051a6ed79478542c77cd669f94366e3ca Mon Sep 17 00:00:00 2001 From: Arnaud Date: Thu, 2 Oct 2025 11:20:06 +0200 Subject: [PATCH 7/7] Tezlink/Tezt: Create a function to register regression test for Tezlink big_map transfer --- .../Alpha- Test of the big_map transfers.out | 438 ++++++++++++++++++ etherlink/tezt/tests/tezlink.ml | 111 ++++- 2 files changed, 548 insertions(+), 1 deletion(-) create mode 100644 etherlink/tezt/tests/expected/tezlink.ml/Alpha- Test of the big_map transfers.out diff --git a/etherlink/tezt/tests/expected/tezlink.ml/Alpha- Test of the big_map transfers.out b/etherlink/tezt/tests/expected/tezlink.ml/Alpha- Test of the big_map transfers.out new file mode 100644 index 000000000000..8d7fae0d3318 --- /dev/null +++ b/etherlink/tezt/tests/expected/tezlink.ml/Alpha- Test of the big_map transfers.out @@ -0,0 +1,438 @@ +Test transferring big map from "sender_fresh" to "receiver_drop" + +./octez-client --wait 0 transfer 0 from bootstrap1 to '[CONTRACT_HASH]' --burn-cap 1 --arg '"[CONTRACT_HASH]"' +Node is bootstrapped. +Estimated gas: 289.150 units (will add 100 for safety) +Estimated storage: no bytes added +Operation successfully injected in the node. +Operation hash is '[OPERATION_HASH]' +Waiting for the operation to be included... +Operation found in block: [BLOCK_HASH] (pass: 3, offset: 0) +This sequence of operations was run: + Manager signed operations: + From: [PUBLIC_KEY_HASH] + Fee to the baker: ꜩ0.000333 + Expected counter: 3 + Gas limit: 390 + Storage limit: 0 bytes + Balance updates: + [PUBLIC_KEY_HASH] ... -ꜩ0.000333 + payload fees(the block proposer) ....... +ꜩ0.000333 + Transaction: + Amount: ꜩ0 + From: [PUBLIC_KEY_HASH] + To: [CONTRACT_HASH] + Parameter: "[CONTRACT_HASH]" + This transaction was successfully applied + Updated storage: Unit + Updated big_maps: + New temp(1) of type (big_map string bytes) + Set temp(1)["d"] to 0x + Consumed gas: 185.659 + Internal operations: + Internal Transaction: + Amount: ꜩ0 + From: [CONTRACT_HASH] + To: [CONTRACT_HASH] + Parameter: -1 + This transaction was successfully applied + Updated storage: Unit + Consumed gas: 103.425 + +The operation has only been included 0 blocks ago. +We recommend to wait more. +Use command + octez-client wait for [OPERATION_HASH] to be included --confirmations 1 --branch [BLOCK_HASH] +and/or an external block explorer. +Test transferring big map from "sender_fresh" to "receiver_store" + +./octez-client --wait 0 transfer 0 from bootstrap1 to '[CONTRACT_HASH]' --burn-cap 1 --arg '"[CONTRACT_HASH]"' +Node is bootstrapped. +Estimated gas: 289.810 units (will add 100 for safety) +Estimated storage: no bytes added +Operation successfully injected in the node. +Operation hash is '[OPERATION_HASH]' +Waiting for the operation to be included... +Operation found in block: [BLOCK_HASH] (pass: 3, offset: 0) +This sequence of operations was run: + Manager signed operations: + From: [PUBLIC_KEY_HASH] + Fee to the baker: ꜩ0.000333 + Expected counter: 6 + Gas limit: 390 + Storage limit: 0 bytes + Balance updates: + [PUBLIC_KEY_HASH] ... -ꜩ0.000333 + payload fees(the block proposer) ....... +ꜩ0.000333 + Transaction: + Amount: ꜩ0 + From: [PUBLIC_KEY_HASH] + To: [CONTRACT_HASH] + Parameter: "[CONTRACT_HASH]" + This transaction was successfully applied + Updated storage: Unit + Updated big_maps: + New temp(1) of type (big_map string bytes) + Set temp(1)["d"] to 0x + Consumed gas: 185.659 + Internal operations: + Internal Transaction: + Amount: ꜩ0 + From: [CONTRACT_HASH] + To: [CONTRACT_HASH] + Parameter: -1 + This transaction was successfully applied + Updated storage: 5 + Updated big_maps: + Copy temp(1) to map(5) + Clear map(4) + Consumed gas: 104.085 + +The operation has only been included 0 blocks ago. +We recommend to wait more. +Use command + octez-client wait for [OPERATION_HASH] to be included --confirmations 1 --branch [BLOCK_HASH] +and/or an external block explorer. +Test transferring big map from "sender_fresh" to "receiver_store_updated" + +./octez-client --wait 0 transfer 0 from bootstrap1 to '[CONTRACT_HASH]' --burn-cap 1 --arg '"[CONTRACT_HASH]"' +Node is bootstrapped. +Estimated gas: 294.180 units (will add 100 for safety) +Estimated storage: no bytes added +Operation successfully injected in the node. +Operation hash is '[OPERATION_HASH]' +Waiting for the operation to be included... +Operation found in block: [BLOCK_HASH] (pass: 3, offset: 0) +This sequence of operations was run: + Manager signed operations: + From: [PUBLIC_KEY_HASH] + Fee to the baker: ꜩ0.000334 + Expected counter: 9 + Gas limit: 395 + Storage limit: 0 bytes + Balance updates: + [PUBLIC_KEY_HASH] ... -ꜩ0.000334 + payload fees(the block proposer) ....... +ꜩ0.000334 + Transaction: + Amount: ꜩ0 + From: [PUBLIC_KEY_HASH] + To: [CONTRACT_HASH] + Parameter: "[CONTRACT_HASH]" + This transaction was successfully applied + Updated storage: Unit + Updated big_maps: + New temp(1) of type (big_map string bytes) + Set temp(1)["d"] to 0x + Consumed gas: 185.659 + Internal operations: + Internal Transaction: + Amount: ꜩ0 + From: [CONTRACT_HASH] + To: [CONTRACT_HASH] + Parameter: -1 + This transaction was successfully applied + Updated storage: 7 + Updated big_maps: + Copy temp(1) to map(7) + Unset map(7)["d"] + Set map(7)["c"] to 0x1124 + Clear map(6) + Consumed gas: 108.455 + +The operation has only been included 0 blocks ago. +We recommend to wait more. +Use command + octez-client wait for [OPERATION_HASH] to be included --confirmations 1 --branch [BLOCK_HASH] +and/or an external block explorer. +Test transferring big map from "sender_stored" to "receiver_drop" + +./octez-client --wait 0 transfer 0 from bootstrap1 to '[CONTRACT_HASH]' --burn-cap 1 --arg '"[CONTRACT_HASH]"' +Node is bootstrapped. +Estimated gas: 287.213 units (will add 100 for safety) +Estimated storage: no bytes added +Operation successfully injected in the node. +Operation hash is '[OPERATION_HASH]' +Waiting for the operation to be included... +Operation found in block: [BLOCK_HASH] (pass: 3, offset: 0) +This sequence of operations was run: + Manager signed operations: + From: [PUBLIC_KEY_HASH] + Fee to the baker: ꜩ0.000333 + Expected counter: 12 + Gas limit: 388 + Storage limit: 0 bytes + Balance updates: + [PUBLIC_KEY_HASH] ... -ꜩ0.000333 + payload fees(the block proposer) ....... +ꜩ0.000333 + Transaction: + Amount: ꜩ0 + From: [PUBLIC_KEY_HASH] + To: [CONTRACT_HASH] + Parameter: "[CONTRACT_HASH]" + This transaction was successfully applied + Updated storage: 8 + Updated big_maps: + Copy map(8) to temp(1) + Consumed gas: 183.722 + Internal operations: + Internal Transaction: + Amount: ꜩ0 + From: [CONTRACT_HASH] + To: [CONTRACT_HASH] + Parameter: -1 + This transaction was successfully applied + Updated storage: Unit + Consumed gas: 103.425 + +The operation has only been included 0 blocks ago. +We recommend to wait more. +Use command + octez-client wait for [OPERATION_HASH] to be included --confirmations 1 --branch [BLOCK_HASH] +and/or an external block explorer. +Test transferring big map from "sender_stored" to "receiver_store" + +./octez-client --wait 0 transfer 0 from bootstrap1 to '[CONTRACT_HASH]' --burn-cap 1 --arg '"[CONTRACT_HASH]"' +Node is bootstrapped. +Estimated gas: 287.873 units (will add 100 for safety) +Estimated storage: no bytes added +Operation successfully injected in the node. +Operation hash is '[OPERATION_HASH]' +Waiting for the operation to be included... +Operation found in block: [BLOCK_HASH] (pass: 3, offset: 0) +This sequence of operations was run: + Manager signed operations: + From: [PUBLIC_KEY_HASH] + Fee to the baker: ꜩ0.000333 + Expected counter: 15 + Gas limit: 388 + Storage limit: 0 bytes + Balance updates: + [PUBLIC_KEY_HASH] ... -ꜩ0.000333 + payload fees(the block proposer) ....... +ꜩ0.000333 + Transaction: + Amount: ꜩ0 + From: [PUBLIC_KEY_HASH] + To: [CONTRACT_HASH] + Parameter: "[CONTRACT_HASH]" + This transaction was successfully applied + Updated storage: 10 + Updated big_maps: + Copy map(10) to temp(1) + Consumed gas: 183.722 + Internal operations: + Internal Transaction: + Amount: ꜩ0 + From: [CONTRACT_HASH] + To: [CONTRACT_HASH] + Parameter: -1 + This transaction was successfully applied + Updated storage: 11 + Updated big_maps: + Copy temp(1) to map(11) + Clear map(9) + Consumed gas: 104.085 + +The operation has only been included 0 blocks ago. +We recommend to wait more. +Use command + octez-client wait for [OPERATION_HASH] to be included --confirmations 1 --branch [BLOCK_HASH] +and/or an external block explorer. +Test transferring big map from "sender_stored" to "receiver_store_updated" + +./octez-client --wait 0 transfer 0 from bootstrap1 to '[CONTRACT_HASH]' --burn-cap 1 --arg '"[CONTRACT_HASH]"' +Node is bootstrapped. +Estimated gas: 292.243 units (will add 100 for safety) +Estimated storage: no bytes added +Operation successfully injected in the node. +Operation hash is '[OPERATION_HASH]' +Waiting for the operation to be included... +Operation found in block: [BLOCK_HASH] (pass: 3, offset: 0) +This sequence of operations was run: + Manager signed operations: + From: [PUBLIC_KEY_HASH] + Fee to the baker: ꜩ0.000334 + Expected counter: 18 + Gas limit: 393 + Storage limit: 0 bytes + Balance updates: + [PUBLIC_KEY_HASH] ... -ꜩ0.000334 + payload fees(the block proposer) ....... +ꜩ0.000334 + Transaction: + Amount: ꜩ0 + From: [PUBLIC_KEY_HASH] + To: [CONTRACT_HASH] + Parameter: "[CONTRACT_HASH]" + This transaction was successfully applied + Updated storage: 13 + Updated big_maps: + Copy map(13) to temp(1) + Consumed gas: 183.722 + Internal operations: + Internal Transaction: + Amount: ꜩ0 + From: [CONTRACT_HASH] + To: [CONTRACT_HASH] + Parameter: -1 + This transaction was successfully applied + Updated storage: 14 + Updated big_maps: + Copy temp(1) to map(14) + Unset map(14)["d"] + Set map(14)["c"] to 0x1124 + Clear map(12) + Consumed gas: 108.455 + +The operation has only been included 0 blocks ago. +We recommend to wait more. +Use command + octez-client wait for [OPERATION_HASH] to be included --confirmations 1 --branch [BLOCK_HASH] +and/or an external block explorer. +Test transferring big map from "sender_stored_updated" to "receiver_drop" + +./octez-client --wait 0 transfer 0 from bootstrap1 to '[CONTRACT_HASH]' --burn-cap 1 --arg '"[CONTRACT_HASH]"' +Node is bootstrapped. +Estimated gas: 291.583 units (will add 100 for safety) +Estimated storage: no bytes added +Operation successfully injected in the node. +Operation hash is '[OPERATION_HASH]' +Waiting for the operation to be included... +Operation found in block: [BLOCK_HASH] (pass: 3, offset: 0) +This sequence of operations was run: + Manager signed operations: + From: [PUBLIC_KEY_HASH] + Fee to the baker: ꜩ0.000334 + Expected counter: 21 + Gas limit: 392 + Storage limit: 0 bytes + Balance updates: + [PUBLIC_KEY_HASH] ... -ꜩ0.000334 + payload fees(the block proposer) ....... +ꜩ0.000334 + Transaction: + Amount: ꜩ0 + From: [PUBLIC_KEY_HASH] + To: [CONTRACT_HASH] + Parameter: "[CONTRACT_HASH]" + This transaction was successfully applied + Updated storage: 15 + Updated big_maps: + Copy map(15) to temp(1) + Unset temp(1)["b"] + Set temp(1)["a"] to 0x0010 + Consumed gas: 188.092 + Internal operations: + Internal Transaction: + Amount: ꜩ0 + From: [CONTRACT_HASH] + To: [CONTRACT_HASH] + Parameter: -1 + This transaction was successfully applied + Updated storage: Unit + Consumed gas: 103.425 + +The operation has only been included 0 blocks ago. +We recommend to wait more. +Use command + octez-client wait for [OPERATION_HASH] to be included --confirmations 1 --branch [BLOCK_HASH] +and/or an external block explorer. +Test transferring big map from "sender_stored_updated" to "receiver_store" + +./octez-client --wait 0 transfer 0 from bootstrap1 to '[CONTRACT_HASH]' --burn-cap 1 --arg '"[CONTRACT_HASH]"' +Node is bootstrapped. +Estimated gas: 292.243 units (will add 100 for safety) +Estimated storage: no bytes added +Operation successfully injected in the node. +Operation hash is '[OPERATION_HASH]' +Waiting for the operation to be included... +Operation found in block: [BLOCK_HASH] (pass: 3, offset: 0) +This sequence of operations was run: + Manager signed operations: + From: [PUBLIC_KEY_HASH] + Fee to the baker: ꜩ0.000334 + Expected counter: 24 + Gas limit: 393 + Storage limit: 0 bytes + Balance updates: + [PUBLIC_KEY_HASH] ... -ꜩ0.000334 + payload fees(the block proposer) ....... +ꜩ0.000334 + Transaction: + Amount: ꜩ0 + From: [PUBLIC_KEY_HASH] + To: [CONTRACT_HASH] + Parameter: "[CONTRACT_HASH]" + This transaction was successfully applied + Updated storage: 17 + Updated big_maps: + Copy map(17) to temp(1) + Unset temp(1)["b"] + Set temp(1)["a"] to 0x0010 + Consumed gas: 188.092 + Internal operations: + Internal Transaction: + Amount: ꜩ0 + From: [CONTRACT_HASH] + To: [CONTRACT_HASH] + Parameter: -1 + This transaction was successfully applied + Updated storage: 18 + Updated big_maps: + Copy temp(1) to map(18) + Clear map(16) + Consumed gas: 104.085 + +The operation has only been included 0 blocks ago. +We recommend to wait more. +Use command + octez-client wait for [OPERATION_HASH] to be included --confirmations 1 --branch [BLOCK_HASH] +and/or an external block explorer. +Test transferring big map from "sender_stored_updated" to "receiver_store_updated" + +./octez-client --wait 0 transfer 0 from bootstrap1 to '[CONTRACT_HASH]' --burn-cap 1 --arg '"[CONTRACT_HASH]"' +Node is bootstrapped. +Estimated gas: 296.613 units (will add 100 for safety) +Estimated storage: no bytes added +Operation successfully injected in the node. +Operation hash is '[OPERATION_HASH]' +Waiting for the operation to be included... +Operation found in block: [BLOCK_HASH] (pass: 3, offset: 0) +This sequence of operations was run: + Manager signed operations: + From: [PUBLIC_KEY_HASH] + Fee to the baker: ꜩ0.000334 + Expected counter: 27 + Gas limit: 397 + Storage limit: 0 bytes + Balance updates: + [PUBLIC_KEY_HASH] ... -ꜩ0.000334 + payload fees(the block proposer) ....... +ꜩ0.000334 + Transaction: + Amount: ꜩ0 + From: [PUBLIC_KEY_HASH] + To: [CONTRACT_HASH] + Parameter: "[CONTRACT_HASH]" + This transaction was successfully applied + Updated storage: 20 + Updated big_maps: + Copy map(20) to temp(1) + Unset temp(1)["b"] + Set temp(1)["a"] to 0x0010 + Consumed gas: 188.092 + Internal operations: + Internal Transaction: + Amount: ꜩ0 + From: [CONTRACT_HASH] + To: [CONTRACT_HASH] + Parameter: -1 + This transaction was successfully applied + Updated storage: 21 + Updated big_maps: + Copy temp(1) to map(21) + Unset map(21)["d"] + Set map(21)["c"] to 0x1124 + Clear map(19) + Consumed gas: 108.455 + +The operation has only been included 0 blocks ago. +We recommend to wait more. +Use command + octez-client wait for [OPERATION_HASH] to be included --confirmations 1 --branch [BLOCK_HASH] +and/or an external block explorer. diff --git a/etherlink/tezt/tests/tezlink.ml b/etherlink/tezt/tests/tezlink.ml index aacc27b33323..044f69f54b33 100644 --- a/etherlink/tezt/tests/tezlink.ml +++ b/etherlink/tezt/tests/tezlink.ml @@ -2799,6 +2799,114 @@ let test_delayed_deposit_is_included = ~error_msg:"Expected a 1000 tez on bootstrap1" ; unit +let originate_contract ~sequencer ~client ~endpoint ~protocol ~filename ~storage + = + let* _alias, address = + Client.originate_contract_at + ~amount:Tez.zero + ~src:"bootstrap1" + ~init:storage + ~burn_cap:Tez.one + ~force:true + ~endpoint + client + ["big_maps"; filename] + protocol + in + let*@ _ = Rpc.produce_block sequencer in + return address + +let call_contract ~sequencer ~client ~endpoint ~address ~arg = + let transfer () = + Client.transfer + ~burn_cap:Tez.one + ~amount:Tez.zero + (* We need to wait for inclusion so that + octez-client receipt is complete with + lazy_storage_updates related to big_maps *) + ~wait:"0" + ~giver:"bootstrap1" + ~receiver:address + ~arg + ~hooks:Tezos_regression.hooks + ~endpoint + client + in + wait_for_application + ~time_between_blocks:1. + ~produce_block:(fun () -> produce_block sequencer) + transfer + +let test_big_map_transfer = + register_tezlink_regression_test + ~title:"Test of the big_map transfers" + ~tags:["big_map"; "compatibility"; "operations"] + ~bootstrap_accounts:[Constant.bootstrap1] + @@ fun {sequencer; client; _} protocol -> + let*@ _ = produce_block sequencer in + let*@ _ = produce_block sequencer in + let*@ _ = produce_block sequencer in + + let endpoint = + Client.( + Foreign_endpoint + Endpoint. + {(Evm_node.rpc_endpoint_record sequencer) with path = "/tezlink"}) + in + + (* This code comes from tezt/tests/contract_big_map_transfer.ml *) + let* () = + Lwt_list.iter_s + (fun (sender_filename, sender_storage) -> + Lwt_list.iter_s + (fun (receiver_filename, receiver_storage) -> + let () = + Regression.capture + (sf + "Test transferring big map from %S to %S" + sender_filename + receiver_filename) + in + let* receiver = + originate_contract + ~sequencer + ~client + ~endpoint + ~protocol + ~filename:receiver_filename + ~storage:receiver_storage + in + let* sender = + originate_contract + ~sequencer + ~client + ~endpoint + ~protocol + ~filename:sender_filename + ~storage:sender_storage + in + let* () = + call_contract + ~sequencer + ~client + ~endpoint + ~address:sender + ~arg:(sf "%S" receiver) + in + unit) + [ + ("receiver_drop", "Unit"); + ("receiver_store", "{}"); + ("receiver_store_updated", "{}"); + ]) + [ + ("sender_fresh", "Unit"); + ("sender_stored", "{Elt \"d\" 0x; }"); + ("sender_stored_updated", "{Elt \"b\" 0x; Elt \"d\" 0x; }"); + ] + in + unit + let () = test_observer_starts [Alpha] ; test_describe_endpoint [Alpha] ; @@ -2846,4 +2954,5 @@ let () = test_tezlink_forge_operations [Alpha] ; test_tezlink_gas_vs_l1 [Alpha] ; test_node_catchup_on_multichain [Alpha] ; - test_delayed_deposit_is_included [Alpha] + test_delayed_deposit_is_included [Alpha] ; + test_big_map_transfer [Alpha] -- GitLab