From eef491b3b1c903fc4abcfd2ecadf98a536d1854b Mon Sep 17 00:00:00 2001 From: Nathan Holland Date: Thu, 19 Oct 2023 15:23:30 -0600 Subject: [PATCH 001/119] Propagate thread errors from o1trace even if they were raised during first cycle --- src/lib/o1trace/o1trace.ml | 49 +++++++++++++++++++++++++++++++------- 1 file changed, 41 insertions(+), 8 deletions(-) diff --git a/src/lib/o1trace/o1trace.ml b/src/lib/o1trace/o1trace.ml index 79b5ae5ea61a..62514d5db2e0 100644 --- a/src/lib/o1trace/o1trace.ml +++ b/src/lib/o1trace/o1trace.ml @@ -86,9 +86,9 @@ let thread name f = let ctx = with_o1trace ~name ctx in match Scheduler.within_context ctx f with | Error () -> - failwithf - "timing task `%s` failed, exception reported to parent monitor" name - () + (* Scheduler.within_context will send the actual error to the parent monitor asynchronously. + * At this point, the thread has crashed, so we just return a Deferred that will never resolve *) + Deferred.create (Fn.const ()) | Ok x -> x ) @@ -102,11 +102,12 @@ let sync_thread name f = let start_time = Time_ns.now () in let ctx = Scheduler.current_execution_context () in let ctx = with_o1trace ~name ctx in - match Scheduler.within_context ctx f with - | Error () -> - failwithf - "sync timing task `%s` failed, exception reported to parent monitor" - name () + match + Scheduler.Private.with_execution_context (Scheduler.Private.t ()) ctx + ~f:(fun () -> Result.try_with f) + with + | Error exn -> + Exn.reraise exn "exception caught by O1trace.sync_thread" | Ok result -> let elapsed_time = Time_ns.abs_diff (Time_ns.now ()) start_time in on_job_exit' fiber elapsed_time ; @@ -125,6 +126,12 @@ let () = let%test_module "thread tests" = ( module struct + exception Test_exn + + let is_test_exn exn = + (* there isn't a great way to compare the exn to the one that was thrown due to how async mangles the exn, so we do this instead *) + String.is_substring (Exn.to_string exn) ~substring:"(Test_exn)" + let child_of n = match let prev_sync_fiber = !current_sync_fiber in @@ -214,5 +221,31 @@ let%test_module "thread tests" = Deferred.unit ) ) ) ; Deferred.unit ) ) ) + let%test_unit "exceptions are handled properly when raised in first cycle \ + of a thread" = + test (fun stop -> + match%map + Monitor.try_with (fun () -> + thread "test" (fun () -> raise Test_exn) ) + with + | Ok _ -> + failwith "expected a failure" + | Error exn -> + assert (is_test_exn exn) ; + stop () ) + + let%test_unit "exceptions are handled properly when raised in first cycle \ + of a sync_thread" = + test (fun stop -> + match%map + Monitor.try_with (fun () -> + sync_thread "test" (fun () -> raise Test_exn) ) + with + | Ok _ -> + failwith "expected a failure" + | Error exn -> + assert (is_test_exn exn) ; + stop () ) + (* TODO: recursion tests *) end ) From 61b8817e5603cf808bfea2ef6505dacec69c5859 Mon Sep 17 00:00:00 2001 From: Nathan Holland Date: Thu, 19 Oct 2023 15:23:51 -0600 Subject: [PATCH 002/119] Reraise mina_net2 errors --- src/lib/gossip_net/libp2p.ml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/lib/gossip_net/libp2p.ml b/src/lib/gossip_net/libp2p.ml index 2fbc2c2662e1..5962a6dac353 100644 --- a/src/lib/gossip_net/libp2p.ml +++ b/src/lib/gossip_net/libp2p.ml @@ -217,7 +217,7 @@ module Make (Rpc_intf : Network_peer.Rpc_intf.Rpc_interface_intf) : | Mina_net2.Libp2p_helper_died_unexpectedly -> on_unexpected_termination () | _ -> - raise exn + Exn.reraise exn "Mina_net2 raised an exception" in let%bind seeds_from_url = match config.seed_peer_list_url with From 4061884b18137c1182c7fcfa80f52804008a2509 Mon Sep 17 00:00:00 2001 From: dkijania Date: Fri, 20 Oct 2023 16:15:24 +0200 Subject: [PATCH 003/119] Empty commit to signpost Berkeley 2.0.0rampup6 From d4d59e9dbeb7761e32da8b5154c8bdea79c72203 Mon Sep 17 00:00:00 2001 From: "It's me, CI" Date: Mon, 23 Oct 2023 11:52:27 -0700 Subject: [PATCH 004/119] Base58Check format for receipt chain hash in acct precondition --- src/app/archive/lib/processor.ml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/app/archive/lib/processor.ml b/src/app/archive/lib/processor.ml index a0322fd7bbb7..58c873c0feea 100644 --- a/src/app/archive/lib/processor.ml +++ b/src/app/archive/lib/processor.ml @@ -913,7 +913,7 @@ module Zkapp_account_precondition = struct in let receipt_chain_hash = Zkapp_basic.Or_ignore.to_option acct.receipt_chain_hash - |> Option.map ~f:Kimchi_backend.Pasta.Basic.Fp.to_string + |> Option.map ~f:Receipt.Chain_hash.to_base58_check in let proved_state = Zkapp_basic.Or_ignore.to_option acct.proved_state in let is_new = Zkapp_basic.Or_ignore.to_option acct.is_new in From 74be2a6b53f57e0d909ace819774633bfc759856 Mon Sep 17 00:00:00 2001 From: "It's me, CI" Date: Mon, 23 Oct 2023 13:53:27 -0700 Subject: [PATCH 005/119] Generate scripts to fix receipt chain hashes --- scripts/migrate-receipt-chain-hashes.sh | 27 +++++++++++++++++++ src/app/receipt_chain_hash_to_b58/dune | 19 +++++++++++++ .../receipt_chain_hash_to_b58.ml | 5 ++++ src/dune-project | 1 + 4 files changed, 52 insertions(+) create mode 100755 scripts/migrate-receipt-chain-hashes.sh create mode 100644 src/app/receipt_chain_hash_to_b58/dune create mode 100644 src/app/receipt_chain_hash_to_b58/receipt_chain_hash_to_b58.ml diff --git a/scripts/migrate-receipt-chain-hashes.sh b/scripts/migrate-receipt-chain-hashes.sh new file mode 100755 index 000000000000..63c7d904f3d2 --- /dev/null +++ b/scripts/migrate-receipt-chain-hashes.sh @@ -0,0 +1,27 @@ +#!/bin/bash + +if [ ! $# -eq 1 ] ; then + echo "Usage" $0 archive-db + exit 0 +fi + +ARCHIVE_DB=$1 +HASHES_FILE=hashes_file.tmp +UPDATE_SCRIPT=hashes_update.sql + +rm -f $HASHES_FILE +rm -f $UPDATE_SCRIPT + +echo "select id,receipt_chain_hash from zkapp_account_precondition where receipt_chain_hash is not null;" | \ + psql --csv -t -q $ARCHIVE_DB > $HASHES_FILE + +for line in `cat $HASHES_FILE` + do ( + ID=$(echo $line | awk -F , '{print $1}'); + FP=$(echo $line | awk -F , '{print $2}'); + B58=$(echo $FP | _build/default/src/app/receipt_chain_hash_to_b58/receipt_chain_hash_to_b58.exe); + echo $ID "'"$B58"'" | awk '{print "UPDATE zkapp_account_precondition SET receipt_chain_hash=" $2 " WHERE id=" $1 ";"}' >> $UPDATE_SCRIPT) +done + +echo "Done!" +echo "Now run:" "psql -d" $ARCHIVE_DB "<" $UPDATE_SCRIPT diff --git a/src/app/receipt_chain_hash_to_b58/dune b/src/app/receipt_chain_hash_to_b58/dune new file mode 100644 index 000000000000..8dcf8756f11e --- /dev/null +++ b/src/app/receipt_chain_hash_to_b58/dune @@ -0,0 +1,19 @@ +(executable + (package receipt_chain_hash_to_b58) + (name receipt_chain_hash_to_b58) + (public_name receipt_chain_hash_to_b58) + (libraries + ;; opam libraries + ;; local libraries + mina_base + kimchi_backend + kimchi_backend.pasta + kimchi_backend.pasta.basic + pickles + pickles.backend + pickles_types + snark_params + ) + (preprocessor_deps ../../config.mlh) + (instrumentation (backend bisect_ppx)) + (preprocess (pps ppx_mina ppx_version))) diff --git a/src/app/receipt_chain_hash_to_b58/receipt_chain_hash_to_b58.ml b/src/app/receipt_chain_hash_to_b58/receipt_chain_hash_to_b58.ml new file mode 100644 index 000000000000..a363c6dffff5 --- /dev/null +++ b/src/app/receipt_chain_hash_to_b58/receipt_chain_hash_to_b58.ml @@ -0,0 +1,5 @@ +let () = + let s = Stdlib.read_line () in + let fp = Kimchi_backend.Pasta.Basic.Fp.of_string s in + let receipt_chain_hash : Mina_base.Receipt.Chain_hash.t = fp in + Format.printf "%s@." (Mina_base.Receipt.Chain_hash.to_base58_check receipt_chain_hash) diff --git a/src/dune-project b/src/dune-project index aeaeae71671e..ff60cccc4c47 100644 --- a/src/dune-project +++ b/src/dune-project @@ -163,6 +163,7 @@ (package (name random_oracle_input)) (package (name random_oracle)) (package (name rc_pool)) +(package (name receipt_chain_hash_to_b58)) (package (name replayer)) (package (name rfc3339_time)) (package (name rocksdb)) From af5962b3c120a2d86e6a59ce3559d4e802eb1f07 Mon Sep 17 00:00:00 2001 From: "It's me, CI" Date: Tue, 24 Oct 2023 12:34:40 -0700 Subject: [PATCH 006/119] Allow passing hashes_file and update_script names on command line --- scripts/migrate-receipt-chain-hashes.sh | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/scripts/migrate-receipt-chain-hashes.sh b/scripts/migrate-receipt-chain-hashes.sh index 63c7d904f3d2..13192a3b546f 100755 --- a/scripts/migrate-receipt-chain-hashes.sh +++ b/scripts/migrate-receipt-chain-hashes.sh @@ -1,13 +1,17 @@ #!/bin/bash -if [ ! $# -eq 1 ] ; then - echo "Usage" $0 archive-db +if [ $# -lt 1 ] || [ $# -gt 3 ]; then + echo "Usage" $0 archive-db [hashes_file] [update_script] + echo "'hashes_file' and 'update_script' are created when running this script" exit 0 fi ARCHIVE_DB=$1 -HASHES_FILE=hashes_file.tmp -UPDATE_SCRIPT=hashes_update.sql +HASHES_FILE=${2:-hashes_file.tmp} +UPDATE_SCRIPT=${3:-hashes_update.sql} + +echo "Migrating receipt chain hashes in account preconditions in archive db '"$ARCHIVE_DB"'" +echo "Using temporary file '"$HASHES_FILE"' and creating SQL script '"$UPDATE_SCRIPT"'" rm -f $HASHES_FILE rm -f $UPDATE_SCRIPT From 1cc36faf439b1161eae3e41cf5dae58e5b1cfa85 Mon Sep 17 00:00:00 2001 From: "It's me, CI" Date: Tue, 24 Oct 2023 15:59:50 -0700 Subject: [PATCH 007/119] Use generated block fields for subchain queries --- src/app/extract_blocks/sql.ml | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/src/app/extract_blocks/sql.ml b/src/app/extract_blocks/sql.ml index 4ce9c18a333a..2c4b65997319 100644 --- a/src/app/extract_blocks/sql.ml +++ b/src/app/extract_blocks/sql.ml @@ -2,19 +2,22 @@ module Subchain = struct let make_sql ~join_condition = + let insert_commas s = Core_kernel.String.concat ~sep:"," s in + let fields = insert_commas Archive_lib.Processor.Block.Fields.names in + let b_fields = + insert_commas + @@ Core_kernel.List.map Archive_lib.Processor.Block.Fields.names + ~f:(fun field -> "b." ^ field) + in Core_kernel.sprintf {sql| WITH RECURSIVE chain AS ( - SELECT id,state_hash,parent_id,parent_hash,creator_id,block_winner_id,snarked_ledger_hash_id, - staking_epoch_data_id,next_epoch_data_id,min_window_density,total_currency,ledger_hash, - height,global_slot_since_hard_fork,global_slot_since_genesis,timestamp,chain_status + SELECT %s FROM blocks b WHERE b.state_hash = $1 UNION ALL - SELECT b.id,b.state_hash,b.parent_id,b.parent_hash,b.creator_id,b.block_winner_id,b.snarked_ledger_hash_id, - b.staking_epoch_data_id,b.next_epoch_data_id,b.min_window_density,b.total_currency,b.ledger_hash, - b.height,b.global_slot_since_hard_fork,b.global_slot_since_genesis,b.timestamp,b.chain_status + SELECT %s FROM blocks b INNER JOIN chain @@ -22,12 +25,10 @@ module Subchain = struct ON %s ) - SELECT state_hash,parent_id,parent_hash,creator_id,block_winner_id,snarked_ledger_hash_id, - staking_epoch_data_id,next_epoch_data_id,min_window_density,total_currency,ledger_hash, - height,global_slot_since_hard_fork,global_slot_since_genesis,timestamp,chain_status + SELECT %s FROM chain |sql} - join_condition + fields b_fields join_condition fields let query_unparented = Caqti_request.collect Caqti_type.string Archive_lib.Processor.Block.typ From a4180b598f47f8731ef87b7f75addff22879bfe0 Mon Sep 17 00:00:00 2001 From: "It's me, CI" Date: Mon, 30 Oct 2023 10:31:18 -0700 Subject: [PATCH 008/119] Function to preserve sign for neg 0 --- src/app/archive/lib/load_data.ml | 3 ++- src/lib/currency/currency.ml | 2 ++ src/lib/currency/intf.ml | 3 +++ 3 files changed, 7 insertions(+), 1 deletion(-) diff --git a/src/app/archive/lib/load_data.ml b/src/app/archive/lib/load_data.ml index 0dc3c4bb1bcf..421613f333dd 100644 --- a/src/app/archive/lib/load_data.ml +++ b/src/app/archive/lib/load_data.ml @@ -448,7 +448,8 @@ let get_account_update_body ~pool body_id = | _ -> failwith "Ill-formatted string for balance change" in - Currency.Amount.Signed.create ~magnitude ~sgn + (* amount might be negative zero *) + Currency.Amount.Signed.create_preserve_zero_sign ~magnitude ~sgn in let%bind events = load_events pool events_id in let%bind actions = load_events pool actions_id in diff --git a/src/lib/currency/currency.ml b/src/lib/currency/currency.ml index 39fa21fe55b9..44de877cc264 100644 --- a/src/lib/currency/currency.ml +++ b/src/lib/currency/currency.ml @@ -479,6 +479,8 @@ module Make_str (A : Wire_types.Concrete) = struct ; sgn = (if Unsigned.(equal magnitude zero) then Sgn.Pos else sgn) } + let create_preserve_zero_sign ~magnitude ~sgn = { magnitude; sgn } + let sgn { sgn; _ } = sgn let magnitude { magnitude; _ } = magnitude diff --git a/src/lib/currency/intf.ml b/src/lib/currency/intf.ml index 5709223a18b3..197a229b4631 100644 --- a/src/lib/currency/intf.ml +++ b/src/lib/currency/intf.ml @@ -125,6 +125,9 @@ module type Signed_intf = sig val create : magnitude:magnitude -> sgn:Sgn.t -> t + (* allows creation of negative 0 *) + val create_preserve_zero_sign : magnitude:magnitude -> sgn:Sgn.t -> t + val sgn : t -> Sgn.t val magnitude : t -> magnitude From f7c4c3c5fc2d5079987bde98785e8c6dcddb1f64 Mon Sep 17 00:00:00 2001 From: "It's me, CI" Date: Mon, 30 Oct 2023 11:18:50 -0700 Subject: [PATCH 009/119] allow already-migrated hashes --- scripts/migrate-receipt-chain-hashes.sh | 5 ++++- src/app/receipt_chain_hash_to_b58/dune | 1 + .../receipt_chain_hash_to_b58.ml | 16 +++++++++++++--- 3 files changed, 18 insertions(+), 4 deletions(-) diff --git a/scripts/migrate-receipt-chain-hashes.sh b/scripts/migrate-receipt-chain-hashes.sh index 13192a3b546f..e2d22db41b68 100755 --- a/scripts/migrate-receipt-chain-hashes.sh +++ b/scripts/migrate-receipt-chain-hashes.sh @@ -11,21 +11,24 @@ HASHES_FILE=${2:-hashes_file.tmp} UPDATE_SCRIPT=${3:-hashes_update.sql} echo "Migrating receipt chain hashes in account preconditions in archive db '"$ARCHIVE_DB"'" -echo "Using temporary file '"$HASHES_FILE"' and creating SQL script '"$UPDATE_SCRIPT"'" rm -f $HASHES_FILE rm -f $UPDATE_SCRIPT +echo "Creating temporary file" "'"$HASHES_FILE"'" echo "select id,receipt_chain_hash from zkapp_account_precondition where receipt_chain_hash is not null;" | \ psql --csv -t -q $ARCHIVE_DB > $HASHES_FILE +echo "Creating SQL script" "'"$UPDATE_SCRIPT"'" for line in `cat $HASHES_FILE` do ( ID=$(echo $line | awk -F , '{print $1}'); FP=$(echo $line | awk -F , '{print $2}'); B58=$(echo $FP | _build/default/src/app/receipt_chain_hash_to_b58/receipt_chain_hash_to_b58.exe); + echo -n . echo $ID "'"$B58"'" | awk '{print "UPDATE zkapp_account_precondition SET receipt_chain_hash=" $2 " WHERE id=" $1 ";"}' >> $UPDATE_SCRIPT) done +echo echo "Done!" echo "Now run:" "psql -d" $ARCHIVE_DB "<" $UPDATE_SCRIPT diff --git a/src/app/receipt_chain_hash_to_b58/dune b/src/app/receipt_chain_hash_to_b58/dune index 8dcf8756f11e..4217bb626ad9 100644 --- a/src/app/receipt_chain_hash_to_b58/dune +++ b/src/app/receipt_chain_hash_to_b58/dune @@ -4,6 +4,7 @@ (public_name receipt_chain_hash_to_b58) (libraries ;; opam libraries + core_kernel ;; local libraries mina_base kimchi_backend diff --git a/src/app/receipt_chain_hash_to_b58/receipt_chain_hash_to_b58.ml b/src/app/receipt_chain_hash_to_b58/receipt_chain_hash_to_b58.ml index a363c6dffff5..59a3113cf5bb 100644 --- a/src/app/receipt_chain_hash_to_b58/receipt_chain_hash_to_b58.ml +++ b/src/app/receipt_chain_hash_to_b58/receipt_chain_hash_to_b58.ml @@ -1,5 +1,15 @@ +open Core_kernel +open Mina_base + let () = let s = Stdlib.read_line () in - let fp = Kimchi_backend.Pasta.Basic.Fp.of_string s in - let receipt_chain_hash : Mina_base.Receipt.Chain_hash.t = fp in - Format.printf "%s@." (Mina_base.Receipt.Chain_hash.to_base58_check receipt_chain_hash) + let b58 = + match Receipt.Chain_hash.of_base58_check s with + | Ok _ -> + s + | Error _ -> + let fp = Kimchi_backend.Pasta.Basic.Fp.of_string s in + let receipt_chain_hash : Mina_base.Receipt.Chain_hash.t = fp in + Receipt.Chain_hash.to_base58_check receipt_chain_hash + in + Format.printf "%s@." b58 From cfeb6aa5040070bfe119b378307fb712da925d20 Mon Sep 17 00:00:00 2001 From: "It's me, CI" Date: Mon, 30 Oct 2023 17:14:58 -0700 Subject: [PATCH 010/119] Base64 encoding of last_vrf_output --- src/app/archive/lib/dune | 5 ++-- src/app/archive/lib/extensional.ml | 2 +- src/app/archive/lib/processor.ml | 34 +++++++++++------------- src/app/extract_blocks/dune | 1 + src/app/extract_blocks/extract_blocks.ml | 3 +-- src/app/replayer/replayer.ml | 2 +- src/lib/consensus/vrf/consensus_vrf.ml | 23 ++++++++++------ src/lib/consensus/vrf/dune | 1 + 8 files changed, 39 insertions(+), 32 deletions(-) diff --git a/src/app/archive/lib/dune b/src/app/archive/lib/dune index 0117fce5bb8b..385408aca97e 100644 --- a/src/app/archive/lib/dune +++ b/src/app/archive/lib/dune @@ -27,8 +27,8 @@ child_processes precomputed_values coda_genesis_ledger + consensus.vrf mina_runtime_config - hex sgn mina_base.util kimchi_backend.pasta @@ -72,9 +72,10 @@ mina_version staged_ledger_diff error_json + ppx_deriving_yojson.runtime ppx_version.runtime ) (inline_tests (flags -verbose -show-counts)) (modes native) (instrumentation (backend bisect_ppx)) - (preprocess (pps ppx_mina ppx_version ppx_jane ppx_custom_printf h_list.ppx))) + (preprocess (pps ppx_mina ppx_version ppx_jane ppx_custom_printf ppx_deriving_yojson h_list.ppx))) diff --git a/src/app/archive/lib/extensional.ml b/src/app/archive/lib/extensional.ml index bd4c962e21af..30d02cce6f4b 100644 --- a/src/app/archive/lib/extensional.ml +++ b/src/app/archive/lib/extensional.ml @@ -111,7 +111,7 @@ module Block = struct ; parent_hash : State_hash.Stable.V1.t ; creator : Public_key.Compressed.Stable.V1.t ; block_winner : Public_key.Compressed.Stable.V1.t - ; last_vrf_output : string + ; last_vrf_output : Consensus_vrf.Output.Truncated.Stable.V1.t ; snarked_ledger_hash : Frozen_ledger_hash.Stable.V1.t ; staking_epoch_data : Mina_base.Epoch_data.Value.Stable.V1.t ; next_epoch_data : Mina_base.Epoch_data.Value.Stable.V1.t diff --git a/src/app/archive/lib/processor.ml b/src/app/archive/lib/processor.ml index 58c873c0feea..cbbef5942ae4 100644 --- a/src/app/archive/lib/processor.ml +++ b/src/app/archive/lib/processor.ml @@ -1845,7 +1845,7 @@ module User_command = struct (Caqti_request.find typ Caqti_type.int (Mina_caqti.insert_into_cols ~returning:"id" ~table_name ~tannot:(function - | "typ" -> Some "user_command_type" | _ -> None ) + | "command_type" -> Some "user_command_type" | _ -> None ) ~cols:Fields.names () ) ) { command_type = user_cmd.command_type ; fee_payer_id @@ -1994,7 +1994,8 @@ module Internal_command = struct (Caqti_request.find typ Caqti_type.int (Mina_caqti.insert_into_cols ~returning:"id" ~table_name ~tannot:(function - | "typ" -> Some "internal_command_type" | _ -> None ) + | "command_type" -> Some "internal_command_type" | _ -> None + ) ~cols:Fields.names () ) ) { command_type = internal_cmd.command_type ; receiver_id @@ -2767,9 +2768,9 @@ module Block = struct (Consensus.Data.Consensus_state.block_stake_winner consensus_state) in let last_vrf_output = - (* encode as hex, Postgresql won't accept arbitrary bitstrings *) + (* encode as base64, same as in precomputed blocks JSON *) Consensus.Data.Consensus_state.last_vrf_output consensus_state - |> Hex.Safe.to_hex + |> Base64.encode_exn ~alphabet:Base64.uri_safe_alphabet in let%bind snarked_ledger_hash_id = Snarked_ledger_hash.add_if_doesn't_exist @@ -3173,8 +3174,9 @@ module Block = struct Public_key.add_if_doesn't_exist (module Conn) block.block_winner in let last_vrf_output = - (* already encoded as hex *) + (* encode as base64, same as in precomputed blocks JSON *) block.last_vrf_output + |> Base64.encode_exn ~alphabet:Base64.uri_safe_alphabet in let%bind snarked_ledger_hash_id = Snarked_ledger_hash.add_if_doesn't_exist @@ -3214,19 +3216,15 @@ module Block = struct in Conn.find (Caqti_request.find typ Caqti_type.int - {sql| INSERT INTO blocks - (state_hash, parent_id, parent_hash, - creator_id, block_winner_id,last_vrf_output, - snarked_ledger_hash_id, staking_epoch_data_id, - next_epoch_data_id, - min_window_density, sub_window_densities, total_currency, - ledger_hash, height, - global_slot_since_hard_fork, global_slot_since_genesis, - protocol_version, proposed_protocol_version, - timestamp, chain_status) - VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?::bigint[], ?, ?, ?, ?, ?, ?, ?, ?, ?::chain_status_type) - RETURNING id - |sql} ) + (Mina_caqti.insert_into_cols ~returning:"id" ~table_name + ~tannot:(function + | "sub_window_densities" -> + Some "bigint[]" + | "chain_status" -> + Some "chain_status_type" + | _ -> + None ) + ~cols:Fields.names () ) ) { state_hash = block.state_hash |> State_hash.to_base58_check ; parent_id ; parent_hash = block.parent_hash |> State_hash.to_base58_check diff --git a/src/app/extract_blocks/dune b/src/app/extract_blocks/dune index a0df1831b25d..e3bf8046c9e4 100644 --- a/src/app/extract_blocks/dune +++ b/src/app/extract_blocks/dune @@ -18,6 +18,7 @@ uri async.async_command ;; local libraries + consensus_vrf mina_wire_types mina_base mina_base.import diff --git a/src/app/extract_blocks/extract_blocks.ml b/src/app/extract_blocks/extract_blocks.ml index f4fa0ac2e77c..3605538ae44a 100644 --- a/src/app/extract_blocks/extract_blocks.ml +++ b/src/app/extract_blocks/extract_blocks.ml @@ -49,8 +49,7 @@ let fill_in_block pool (block : Archive_lib.Processor.Block.t) : let%bind creator = pk_of_id block.creator_id in let%bind block_winner = pk_of_id block.block_winner_id in let last_vrf_output = - (* keep hex encoding *) - block.last_vrf_output + Base64.decode_exn ~alphabet:Base64.uri_safe_alphabet block.last_vrf_output in let%bind snarked_ledger_hash_str = query_db ~f:(fun db -> diff --git a/src/app/replayer/replayer.ml b/src/app/replayer/replayer.ml index a47e9eaeca42..e35e3622d2f0 100644 --- a/src/app/replayer/replayer.ml +++ b/src/app/replayer/replayer.ml @@ -1574,7 +1574,7 @@ let () = Command.async ~summary:"Replay transactions from Mina archive database" (let%map input_file = Param.flag "--input-file" - ~doc:"file File containing the genesis ledger" + ~doc:"file File containing the starting ledger" Param.(required string) and output_file_opt = Param.flag "--output-file" diff --git a/src/lib/consensus/vrf/consensus_vrf.ml b/src/lib/consensus/vrf/consensus_vrf.ml index 550f59dd770a..60646a4ccc5c 100644 --- a/src/lib/consensus/vrf/consensus_vrf.ml +++ b/src/lib/consensus/vrf/consensus_vrf.ml @@ -165,20 +165,24 @@ module Output = struct [%%versioned module Stable = struct module V1 = struct - type t = string [@@deriving sexp, equal, compare, hash] + type t = string [@@deriving sexp, equal, compare, hash, yojson] let to_yojson t = `String (Base64.encode_exn ~alphabet:Base64.uri_safe_alphabet t) let of_yojson = function | `String s -> - Result.map_error - (Base64.decode ~alphabet:Base64.uri_safe_alphabet s) - ~f:(function `Msg err -> - sprintf - "Error decoding vrf output in \ - Vrf.Output.Truncated.Stable.V1.of_yojson: %s" - err ) + (* missing type equation somewhere, add explicit type *) + ( match Base64.decode ~alphabet:Base64.uri_safe_alphabet s with + | Ok b64 -> + Ppx_deriving_yojson_runtime.Result.Ok b64 + | Error (`Msg err) -> + Error + (sprintf + "Error decoding vrf output in \ + Vrf.Output.Truncated.Stable.V1.of_yojson: %s" + err ) + : (t, string) Ppx_deriving_yojson_runtime.Result.result ) | _ -> Error "Vrf.Output.Truncated.Stable.V1.of_yojson: Expected a string" @@ -195,6 +199,9 @@ module Output = struct let description = "Vrf Truncated Output" end) + (* don't want the yojson functions from Make_base58_check *) + [%%define_locally Stable.Latest.(of_yojson, to_yojson)] + open Tick let length_in_bits = Int.min 256 (Field.size_in_bits - 2) diff --git a/src/lib/consensus/vrf/dune b/src/lib/consensus/vrf/dune index d9057e3abc1f..9b76dd6b1a2e 100644 --- a/src/lib/consensus/vrf/dune +++ b/src/lib/consensus/vrf/dune @@ -47,6 +47,7 @@ kimchi_bindings kimchi_types pasta_bindings + ppx_deriving_yojson.runtime ppx_version.runtime ) (inline_tests (flags -verbose -show-counts)) From 142d260ff981898df882eef1612e8c3297c0af55 Mon Sep 17 00:00:00 2001 From: "It's me, CI" Date: Mon, 30 Oct 2023 18:43:09 -0700 Subject: [PATCH 011/119] Migrate last_vrf_output --- scripts/migrate-itn-data.sh | 54 +++++++++++++++++++ scripts/migrate-receipt-chain-hashes.sh | 34 ------------ src/app/last_vrf_output_to_b64/dune | 14 +++++ .../last_vrf_output_to_b64.ml | 23 ++++++++ src/dune-project | 1 + 5 files changed, 92 insertions(+), 34 deletions(-) create mode 100755 scripts/migrate-itn-data.sh delete mode 100755 scripts/migrate-receipt-chain-hashes.sh create mode 100644 src/app/last_vrf_output_to_b64/dune create mode 100644 src/app/last_vrf_output_to_b64/last_vrf_output_to_b64.ml diff --git a/scripts/migrate-itn-data.sh b/scripts/migrate-itn-data.sh new file mode 100755 index 000000000000..41f1e4b0a598 --- /dev/null +++ b/scripts/migrate-itn-data.sh @@ -0,0 +1,54 @@ +#!/bin/bash + +if [ $# -lt 1 ] || [ $# -gt 3 ]; then + echo "Usage" $0 archive-db [data_file] [update_script] + echo "'data_file' and 'update_script' are created when running this script" + exit 0 +fi + +ARCHIVE_DB=$1 +DATA_FILE=${2:-data_file.tmp} +UPDATE_SCRIPT=${3:-data_update.sql} + +echo "Migrating receipt chain hashes in account preconditions in archive db '"$ARCHIVE_DB"'" + +rm -f $DATA_FILE +rm -f $UPDATE_SCRIPT + +echo "Creating temporary file with receipt chain hashes" "'"$DATA_FILE"'" +echo "select id,receipt_chain_hash from zkapp_account_precondition where receipt_chain_hash is not null;" | \ + psql --csv -t -q $ARCHIVE_DB > $DATA_FILE + +echo "Creating SQL script" "'"$UPDATE_SCRIPT"'" +for line in `cat $DATA_FILE` + do ( + ID=$(echo $line | awk -F , '{print $1}'); + FP=$(echo $line | awk -F , '{print $2}'); + B58=$(echo $FP | _build/default/src/app/receipt_chain_hash_to_b58/receipt_chain_hash_to_b58.exe); + echo -n . + echo $ID "'"$B58"'" | awk '{print "UPDATE zkapp_account_precondition SET receipt_chain_hash=" $2 " WHERE id=" $1 ";"}' >> $UPDATE_SCRIPT) +done + +echo +echo "Receipt chain hash pass done!" + +rm -f $DATA_FILE + +echo "Creating temporary file with last_vrf_ouput" "'"$DATA_FILE"'" +echo "select id,last_vrf_output from blocks;" | \ + psql --csv -t -q $ARCHIVE_DB > $DATA_FILE + +echo "Adding to SQL script" "'"$UPDATE_SCRIPT"'" +for line in `cat $DATA_FILE` + do ( + ID=$(echo $line | awk -F , '{print $1}'); + FP=$(echo $line | awk -F , '{print $2}'); + B64=$(echo $FP | _build/default/src/app/last_vrf_output_to_b64/last_vrf_output_to_b64.exe); + echo -n . + echo $ID "'"$B64"'" | awk '{print "UPDATE blocks SET last_vrf_output=" $2 " WHERE id=" $1 ";"}' >> $UPDATE_SCRIPT) +done + +echo +echo "Last VRF output pass done!" + +echo "Now run:" "psql -d" $ARCHIVE_DB "<" $UPDATE_SCRIPT diff --git a/scripts/migrate-receipt-chain-hashes.sh b/scripts/migrate-receipt-chain-hashes.sh deleted file mode 100755 index e2d22db41b68..000000000000 --- a/scripts/migrate-receipt-chain-hashes.sh +++ /dev/null @@ -1,34 +0,0 @@ -#!/bin/bash - -if [ $# -lt 1 ] || [ $# -gt 3 ]; then - echo "Usage" $0 archive-db [hashes_file] [update_script] - echo "'hashes_file' and 'update_script' are created when running this script" - exit 0 -fi - -ARCHIVE_DB=$1 -HASHES_FILE=${2:-hashes_file.tmp} -UPDATE_SCRIPT=${3:-hashes_update.sql} - -echo "Migrating receipt chain hashes in account preconditions in archive db '"$ARCHIVE_DB"'" - -rm -f $HASHES_FILE -rm -f $UPDATE_SCRIPT - -echo "Creating temporary file" "'"$HASHES_FILE"'" -echo "select id,receipt_chain_hash from zkapp_account_precondition where receipt_chain_hash is not null;" | \ - psql --csv -t -q $ARCHIVE_DB > $HASHES_FILE - -echo "Creating SQL script" "'"$UPDATE_SCRIPT"'" -for line in `cat $HASHES_FILE` - do ( - ID=$(echo $line | awk -F , '{print $1}'); - FP=$(echo $line | awk -F , '{print $2}'); - B58=$(echo $FP | _build/default/src/app/receipt_chain_hash_to_b58/receipt_chain_hash_to_b58.exe); - echo -n . - echo $ID "'"$B58"'" | awk '{print "UPDATE zkapp_account_precondition SET receipt_chain_hash=" $2 " WHERE id=" $1 ";"}' >> $UPDATE_SCRIPT) -done - -echo -echo "Done!" -echo "Now run:" "psql -d" $ARCHIVE_DB "<" $UPDATE_SCRIPT diff --git a/src/app/last_vrf_output_to_b64/dune b/src/app/last_vrf_output_to_b64/dune new file mode 100644 index 000000000000..54c9f34dbc58 --- /dev/null +++ b/src/app/last_vrf_output_to_b64/dune @@ -0,0 +1,14 @@ +(executable + (package last_vrf_output_to_b64) + (name last_vrf_output_to_b64) + (public_name last_vrf_output_to_b64) + (libraries + ;; opam libraries + base64 + core_kernel + hex + ;; local libraries + ) + (preprocessor_deps ../../config.mlh) + (instrumentation (backend bisect_ppx)) + (preprocess (pps ppx_mina ppx_version))) diff --git a/src/app/last_vrf_output_to_b64/last_vrf_output_to_b64.ml b/src/app/last_vrf_output_to_b64/last_vrf_output_to_b64.ml new file mode 100644 index 000000000000..4cf7c7074ec5 --- /dev/null +++ b/src/app/last_vrf_output_to_b64/last_vrf_output_to_b64.ml @@ -0,0 +1,23 @@ +open Core_kernel + +let () = + let s = Stdlib.read_line () in + let b64_check () = + match Base64.decode ~alphabet:Base64.uri_safe_alphabet s with + | Ok _ -> + (* already base64 *) + s + | Error _ -> + failwith "Bad Base64 encoding" + in + let b64 = + (* try unhexing first, because hex chars are also base64 chars *) + try + match Hex.Safe.of_hex s with + | Some unhexed -> + Base64.encode_exn ~alphabet:Base64.uri_safe_alphabet unhexed + | None -> + b64_check () + with _ -> b64_check () + in + Format.printf "%s@." b64 diff --git a/src/dune-project b/src/dune-project index ff60cccc4c47..7a68f9cdfa8f 100644 --- a/src/dune-project +++ b/src/dune-project @@ -79,6 +79,7 @@ (package (name kimchi_backend)) (package (name kimchi_bindings)) (package (name kimchi_types)) +(package (name last_vrf_output_to_b64)) (package (name ledger_catchup)) (package (name ledger_proof)) (package (name libp2p_ipc)) From 11a2998a3791c1a6db6538c5ce4ea0246dda4dd8 Mon Sep 17 00:00:00 2001 From: "It's me, CI" Date: Tue, 31 Oct 2023 14:02:09 -0700 Subject: [PATCH 012/119] rm unused Balances module --- src/app/replayer/sql.ml | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/src/app/replayer/sql.ml b/src/app/replayer/sql.ml index 711c4bd447f7..40caaf5af13c 100644 --- a/src/app/replayer/sql.ml +++ b/src/app/replayer/sql.ml @@ -464,16 +464,3 @@ module Parent_block = struct epoch_ledgers_state_hash = Conn.find query_parent_state_hash epoch_ledgers_state_hash end - -module Balances = struct - let query_insert_nonce = - Caqti_request.exec - Caqti_type.(tup2 int int64) - {sql| UPDATE balances - SET nonce = $2 - WHERE id = $1 - |sql} - - let insert_nonce (module Conn : Caqti_async.CONNECTION) ~id ~nonce = - Conn.exec query_insert_nonce (id, nonce) -end From a6af91ac6d1c2eead28b5034b3aa8acc9480512f Mon Sep 17 00:00:00 2001 From: Bruno Deferrari Date: Thu, 2 Nov 2023 16:41:00 -0300 Subject: [PATCH 013/119] More memory-efficient scan state and staged ledger hashing This version avoids the construction of intermediary strings through concatenation and instead feeds every chunk directly to the hashing context to considerably reduce the amount of allocations performed during hashing. --- src/lib/parallel_scan/parallel_scan.ml | 41 +++++++++++-------- .../transaction_snark_scan_state.ml | 19 ++++----- 2 files changed, 33 insertions(+), 27 deletions(-) diff --git a/src/lib/parallel_scan/parallel_scan.ml b/src/lib/parallel_scan/parallel_scan.ml index 08fedf99646a..6b983842ed3b 100644 --- a/src/lib/parallel_scan/parallel_scan.ml +++ b/src/lib/parallel_scan/parallel_scan.ml @@ -941,39 +941,46 @@ module State = struct match job with Job.Merge a -> f_merge a | Base d -> f_base d ) in Mina_stdlib.Nonempty_list.iter trees ~f:(fun tree -> - let w_to_string { Weight.base = b; merge = m } = - Int.to_string b ^ Int.to_string m + let add_weight_to_hash { Weight.base = b; merge = m } = + add_string @@ Int.to_string b ; + add_string @@ Int.to_string m + in + let add_weight_pair_to_hash (w1, w2) = + add_weight_to_hash w1 ; add_weight_to_hash w2 in - let w_to_string' (w1, w2) = w_to_string w1 ^ w_to_string w2 in let f_merge = function | w, Merge.Job.Empty -> - add_string (w_to_string' w ^ "Empty") + add_weight_pair_to_hash w ; add_string "Empty" | w, Merge.Job.Full { left; right; status; seq_no } -> - add_string - ( w_to_string' w ^ "Full" ^ Int.to_string seq_no - ^ Job_status.to_string status ) ; + add_weight_pair_to_hash w ; + add_string "Full" ; + add_string @@ Int.to_string seq_no ; + add_string @@ Job_status.to_string status ; add_string (f_merge left) ; add_string (f_merge right) | w, Merge.Job.Part j -> - add_string (w_to_string' w ^ "Part") ; + add_weight_pair_to_hash w ; + add_string "Part" ; add_string (f_merge j) in let f_base = function | w, Base.Job.Empty -> - add_string (w_to_string w ^ "Empty") + add_weight_to_hash w ; add_string "Empty" | w, Base.Job.Full { job; status; seq_no } -> - add_string - ( w_to_string w ^ "Full" ^ Int.to_string seq_no - ^ Job_status.to_string status ) ; + add_weight_to_hash w ; + add_string "Full" ; + add_string @@ Int.to_string seq_no ; + add_string @@ Job_status.to_string status ; add_string (f_base job) in tree_hash tree f_merge f_base ) in - let acc_string = - Option.value_map acc ~default:"None" ~f:(fun (a, d_lst) -> - f_merge a ^ List.fold ~init:"" d_lst ~f:(fun acc d -> acc ^ f_base d) ) - in - add_string acc_string ; + ( match acc with + | Some (a, d_lst) -> + add_string (f_merge a) ; + List.iter d_lst ~f:(fun d -> add_string (f_base d)) + | None -> + add_string "None" ) ; add_string (Int.to_string curr_job_seq_no) ; add_string (Int.to_string max_base_jobs) ; add_string (Int.to_string delay) ; diff --git a/src/lib/transaction_snark_scan_state/transaction_snark_scan_state.ml b/src/lib/transaction_snark_scan_state/transaction_snark_scan_state.ml index 3b82dbeaf991..7a1c5e9f3e25 100644 --- a/src/lib/transaction_snark_scan_state/transaction_snark_scan_state.ml +++ b/src/lib/transaction_snark_scan_state/transaction_snark_scan_state.ml @@ -181,20 +181,19 @@ module Stable = struct t.previous_incomplete_zkapp_updates in let incomplete_updates = - List.fold ~init:"" previous_incomplete_zkapp_updates ~f:(fun acc t -> - acc - ^ Binable.to_string (module Transaction_with_witness.Stable.V2) t ) - |> Digestif.SHA256.digest_string + List.fold ~init:(Digestif.SHA256.init ()) + previous_incomplete_zkapp_updates ~f:(fun h t -> + Digestif.SHA256.feed_string h + @@ Binable.to_string (module Transaction_with_witness.Stable.V2) t ) + |> Digestif.SHA256.get in let continue_in_next_tree = Digestif.SHA256.digest_string (Bool.to_string continue_in_next_tree) in - Staged_ledger_hash.Aux_hash.of_sha256 - Digestif.SHA256.( - digest_string - ( to_raw_string state_hash - ^ to_raw_string incomplete_updates - ^ to_raw_string continue_in_next_tree )) + [ state_hash; incomplete_updates; continue_in_next_tree ] + |> List.fold ~init:(Digestif.SHA256.init ()) ~f:(fun h t -> + Digestif.SHA256.feed_string h (Digestif.SHA256.to_raw_string t) ) + |> Digestif.SHA256.get |> Staged_ledger_hash.Aux_hash.of_sha256 end end] From e4e40a35a607ac74cc7f352e50c4f0da8add7542 Mon Sep 17 00:00:00 2001 From: Nathan Holland Date: Tue, 31 Oct 2023 13:18:35 -0600 Subject: [PATCH 014/119] Fix go read/write races --- src/app/libp2p_helper/src/bitswap_storage.go | 2 +- src/app/libp2p_helper/src/codanet.go | 100 ++++++++++++------ src/app/libp2p_helper/src/codanet_test.go | 4 +- .../libp2p_helper/src/libp2p_helper/app.go | 2 +- .../src/libp2p_helper/bitswap.go | 2 +- .../src/libp2p_helper/config_msg.go | 4 +- .../libp2p_helper/src/libp2p_helper/data.go | 2 +- .../src/libp2p_helper/main_test.go | 4 + .../src/libp2p_helper/multinode_test.go | 4 +- .../src/libp2p_helper/peer_msg.go | 2 +- .../src/libp2p_helper/pubsub_msg.go | 2 +- .../src/libp2p_helper/pubsub_msg_test.go | 5 +- .../src/libp2p_helper/util_test.go | 2 +- 13 files changed, 84 insertions(+), 51 deletions(-) diff --git a/src/app/libp2p_helper/src/bitswap_storage.go b/src/app/libp2p_helper/src/bitswap_storage.go index ff7e089eef22..5a54e36eb2fa 100644 --- a/src/app/libp2p_helper/src/bitswap_storage.go +++ b/src/app/libp2p_helper/src/bitswap_storage.go @@ -4,9 +4,9 @@ import ( "context" "fmt" + "github.com/ipfs/boxo/blockstore" blocks "github.com/ipfs/go-block-format" "github.com/ipfs/go-cid" - "github.com/ipfs/boxo/blockstore" "github.com/ledgerwatch/lmdb-go/lmdb" "github.com/multiformats/go-multihash" lmdbbs "github.com/o1-labs/go-bs-lmdb" diff --git a/src/app/libp2p_helper/src/codanet.go b/src/app/libp2p_helper/src/codanet.go index 431eb76c3291..b54ed689beed 100644 --- a/src/app/libp2p_helper/src/codanet.go +++ b/src/app/libp2p_helper/src/codanet.go @@ -10,12 +10,17 @@ import ( "sync" "time" - "github.com/ipfs/boxo/bitswap" + "github.com/ipfs/boxo/bitswap" bitnet "github.com/ipfs/boxo/bitswap/network" dsb "github.com/ipfs/go-ds-badger" logging "github.com/ipfs/go-log/v2" p2p "github.com/libp2p/go-libp2p" + dht "github.com/libp2p/go-libp2p-kad-dht" + "github.com/libp2p/go-libp2p-kad-dht/dual" + pubsub "github.com/libp2p/go-libp2p-pubsub" + record "github.com/libp2p/go-libp2p-record" + p2pconfig "github.com/libp2p/go-libp2p/config" "github.com/libp2p/go-libp2p/core/connmgr" "github.com/libp2p/go-libp2p/core/control" "github.com/libp2p/go-libp2p/core/crypto" @@ -25,14 +30,9 @@ import ( "github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-libp2p/core/protocol" "github.com/libp2p/go-libp2p/core/routing" - dht "github.com/libp2p/go-libp2p-kad-dht" - "github.com/libp2p/go-libp2p-kad-dht/dual" - "github.com/libp2p/go-libp2p/p2p/host/peerstore/pstoreds" - pubsub "github.com/libp2p/go-libp2p-pubsub" - record "github.com/libp2p/go-libp2p-record" - p2pconfig "github.com/libp2p/go-libp2p/config" mdns "github.com/libp2p/go-libp2p/p2p/discovery/mdns" discovery "github.com/libp2p/go-libp2p/p2p/discovery/routing" + "github.com/libp2p/go-libp2p/p2p/host/peerstore/pstoreds" libp2pyamux "github.com/libp2p/go-libp2p/p2p/muxer/yamux" p2pconnmgr "github.com/libp2p/go-libp2p/p2p/net/connmgr" "github.com/libp2p/go-libp2p/p2p/transport/tcp" @@ -89,9 +89,11 @@ func isPrivateAddr(addr ma.Multiaddr) bool { } type CodaConnectionManager struct { - p2pManager *p2pconnmgr.BasicConnMgr - OnConnect func(network.Network, network.Conn) - OnDisconnect func(network.Network, network.Conn) + p2pManager *p2pconnmgr.BasicConnMgr + onConnectMutex sync.Mutex + onConnect func(network.Network, network.Conn) + onDisconnectMutex sync.Mutex + onDisconnect func(network.Network, network.Conn) // protectedMirror is a map of protected peer ids/tags, mirroring the structure in // BasicConnMgr which is not accessible from CodaConnectionManager protectedMirror map[peer.ID]map[string]interface{} @@ -99,19 +101,23 @@ type CodaConnectionManager struct { } func (cm *CodaConnectionManager) AddOnConnectHandler(f func(network.Network, network.Conn)) { - prevOnConnect := cm.OnConnect - cm.OnConnect = func(net network.Network, c network.Conn) { + cm.onConnectMutex.Lock() + prevOnConnect := cm.onConnect + cm.onConnect = func(net network.Network, c network.Conn) { prevOnConnect(net, c) f(net, c) } + cm.onConnectMutex.Unlock() } func (cm *CodaConnectionManager) AddOnDisconnectHandler(f func(network.Network, network.Conn)) { - prevOnDisconnect := cm.OnDisconnect - cm.OnDisconnect = func(net network.Network, c network.Conn) { + cm.onDisconnectMutex.Lock() + prevOnDisconnect := cm.onDisconnect + cm.onDisconnect = func(net network.Network, c network.Conn) { prevOnDisconnect(net, c) f(net, c) } + cm.onDisconnectMutex.Unlock() } func newCodaConnectionManager(minConnections, maxConnections int, grace time.Duration) (*CodaConnectionManager, error) { @@ -122,8 +128,8 @@ func newCodaConnectionManager(minConnections, maxConnections int, grace time.Dur } return &CodaConnectionManager{ p2pManager: connmgr, - OnConnect: noop, - OnDisconnect: noop, + onConnect: noop, + onDisconnect: noop, protectedMirror: make(map[peer.ID]map[string]interface{}), }, nil } @@ -193,12 +199,16 @@ func (cm *CodaConnectionManager) ListenClose(net network.Network, addr ma.Multia } func (cm *CodaConnectionManager) Connected(net network.Network, c network.Conn) { logger.Debugf("%s connected to %s", c.LocalPeer(), c.RemotePeer()) - cm.OnConnect(net, c) + cm.onConnectMutex.Lock() + cm.onConnect(net, c) + cm.onConnectMutex.Unlock() cm.p2pManager.Notifee().Connected(net, c) } func (cm *CodaConnectionManager) Disconnected(net network.Network, c network.Conn) { - cm.OnDisconnect(net, c) + cm.onDisconnectMutex.Lock() + cm.onDisconnect(net, c) + cm.onDisconnectMutex.Unlock() cm.p2pManager.Notifee().Disconnected(net, c) } @@ -274,7 +284,9 @@ func (ms *MessageStats) GetStats() *safeStats { } func (h *Helper) ResetGatingConfigTrustedAddrFilters() { - h.gatingState.TrustedAddrFilters = ma.NewFilters() + h.gatingState.trustedAddrFiltersMutex.Lock() + h.gatingState.trustedAddrFilters = ma.NewFilters() + h.gatingState.trustedAddrFiltersMutex.Unlock() } // this type implements the ConnectionGating interface @@ -283,10 +295,14 @@ func (h *Helper) ResetGatingConfigTrustedAddrFilters() { type CodaGatingState struct { logger logging.EventLogger KnownPrivateAddrFilters *ma.Filters - BannedAddrFilters *ma.Filters - TrustedAddrFilters *ma.Filters - BannedPeers map[peer.ID]struct{} - TrustedPeers map[peer.ID]struct{} + bannedAddrFiltersMutex sync.Mutex + bannedAddrFilters *ma.Filters + trustedAddrFiltersMutex sync.Mutex + trustedAddrFilters *ma.Filters + bannedPeersMutex sync.Mutex + bannedPeers map[peer.ID]struct{} + trustedPeersMutex sync.Mutex + trustedPeers map[peer.ID]struct{} } type CodaGatingConfig struct { @@ -322,11 +338,11 @@ func NewCodaGatingState(config *CodaGatingConfig, knownPrivateAddrFilters *ma.Fi return &CodaGatingState{ logger: logger, - BannedAddrFilters: bannedAddrFilters, - TrustedAddrFilters: trustedAddrFilters, + bannedAddrFilters: bannedAddrFilters, + trustedAddrFilters: trustedAddrFilters, KnownPrivateAddrFilters: knownPrivateAddrFilters, - BannedPeers: bannedPeers, - TrustedPeers: trustedPeers, + bannedPeers: bannedPeers, + trustedPeers: trustedPeers, } } @@ -335,10 +351,10 @@ func (h *Helper) GatingState() *CodaGatingState { } func (h *Helper) SetGatingState(gs *CodaGatingConfig) { - h.gatingState.TrustedPeers = gs.TrustedPeers - h.gatingState.BannedPeers = gs.BannedPeers - h.gatingState.TrustedAddrFilters = gs.TrustedAddrFilters - h.gatingState.BannedAddrFilters = gs.BannedAddrFilters + h.gatingState.trustedPeers = gs.TrustedPeers + h.gatingState.bannedPeers = gs.BannedPeers + h.gatingState.trustedAddrFilters = gs.TrustedAddrFilters + h.gatingState.bannedAddrFilters = gs.BannedAddrFilters for _, c := range h.Host.Network().Conns() { pid := c.RemotePeer() maddr := c.RemoteMultiaddr() @@ -352,6 +368,12 @@ func (h *Helper) SetGatingState(gs *CodaGatingConfig) { } } +func (gs *CodaGatingState) TrustPeer(p peer.ID) { + gs.trustedPeersMutex.Lock() + gs.trustedPeers[p] = struct{}{} + gs.trustedPeersMutex.Unlock() +} + func (gs *CodaGatingState) MarkPrivateAddrAsKnown(addr ma.Multiaddr) { if isPrivateAddr(addr) && gs.KnownPrivateAddrFilters.AddrBlocked(addr) { gs.logger.Infof("marking private addr %v as known", addr) @@ -397,7 +419,9 @@ func (c connectionAllowance) isDeny() bool { } func (gs *CodaGatingState) checkPeerTrusted(p peer.ID) connectionAllowance { - _, isTrusted := gs.TrustedPeers[p] + gs.trustedPeersMutex.Lock() + _, isTrusted := gs.trustedPeers[p] + gs.trustedPeersMutex.Unlock() if isTrusted { return Accept } @@ -405,7 +429,9 @@ func (gs *CodaGatingState) checkPeerTrusted(p peer.ID) connectionAllowance { } func (gs *CodaGatingState) checkPeerBanned(p peer.ID) connectionAllowance { - _, isBanned := gs.BannedPeers[p] + gs.bannedPeersMutex.Lock() + _, isBanned := gs.bannedPeers[p] + gs.bannedPeersMutex.Unlock() if isBanned { return DenyBannedPeer } @@ -440,14 +466,18 @@ func (gs *CodaGatingState) checkAllowedPeer(p peer.ID) connectionAllowance { } func (gs *CodaGatingState) checkAddrTrusted(addr ma.Multiaddr) connectionAllowance { - if !gs.TrustedAddrFilters.AddrBlocked(addr) { + gs.trustedAddrFiltersMutex.Lock() + defer gs.trustedAddrFiltersMutex.Unlock() + if !gs.trustedAddrFilters.AddrBlocked(addr) { return Accept } return Undecided } func (gs *CodaGatingState) checkAddrBanned(addr ma.Multiaddr) connectionAllowance { - if gs.BannedAddrFilters.AddrBlocked(addr) { + gs.bannedAddrFiltersMutex.Lock() + defer gs.bannedAddrFiltersMutex.Unlock() + if gs.bannedAddrFilters.AddrBlocked(addr) { return DenyBannedAddress } return Undecided diff --git a/src/app/libp2p_helper/src/codanet_test.go b/src/app/libp2p_helper/src/codanet_test.go index 572818dfcd8b..a1404af6e112 100644 --- a/src/app/libp2p_helper/src/codanet_test.go +++ b/src/app/libp2p_helper/src/codanet_test.go @@ -37,7 +37,9 @@ func TestTrustedPrivateConnectionGating(t *testing.T) { allowed := gs.InterceptAddrDial(testInfo.ID, testMa) require.False(t, allowed) - gs.TrustedPeers[testInfo.ID] = struct{}{} + gs.trustedPeersMutex.Lock() + gs.trustedPeers[testInfo.ID] = struct{}{} + gs.trustedPeersMutex.Unlock() allowed = gs.InterceptAddrDial(testInfo.ID, testMa) require.True(t, allowed) } diff --git a/src/app/libp2p_helper/src/libp2p_helper/app.go b/src/app/libp2p_helper/src/libp2p_helper/app.go index ad70a586435d..309fb6a22460 100644 --- a/src/app/libp2p_helper/src/libp2p_helper/app.go +++ b/src/app/libp2p_helper/src/libp2p_helper/app.go @@ -15,9 +15,9 @@ import ( capnp "capnproto.org/go/capnp/v3" "github.com/go-errors/errors" + pubsub "github.com/libp2p/go-libp2p-pubsub" net "github.com/libp2p/go-libp2p/core/network" peer "github.com/libp2p/go-libp2p/core/peer" - pubsub "github.com/libp2p/go-libp2p-pubsub" mdns "github.com/libp2p/go-libp2p/p2p/discovery/mdns" "github.com/multiformats/go-multiaddr" "github.com/prometheus/client_golang/prometheus" diff --git a/src/app/libp2p_helper/src/libp2p_helper/bitswap.go b/src/app/libp2p_helper/src/libp2p_helper/bitswap.go index 69b478f82034..f12063ed48b5 100644 --- a/src/app/libp2p_helper/src/libp2p_helper/bitswap.go +++ b/src/app/libp2p_helper/src/libp2p_helper/bitswap.go @@ -8,7 +8,7 @@ import ( "time" "capnproto.org/go/capnp/v3" - "github.com/ipfs/boxo/bitswap" + "github.com/ipfs/boxo/bitswap" blocks "github.com/ipfs/go-block-format" "github.com/ipfs/go-cid" exchange "github.com/ipfs/go-ipfs-exchange-interface" diff --git a/src/app/libp2p_helper/src/libp2p_helper/config_msg.go b/src/app/libp2p_helper/src/libp2p_helper/config_msg.go index 4f455ea213c7..ef9e215a5c3d 100644 --- a/src/app/libp2p_helper/src/libp2p_helper/config_msg.go +++ b/src/app/libp2p_helper/src/libp2p_helper/config_msg.go @@ -13,12 +13,12 @@ import ( capnp "capnproto.org/go/capnp/v3" "github.com/go-errors/errors" + pubsub "github.com/libp2p/go-libp2p-pubsub" + pb "github.com/libp2p/go-libp2p-pubsub/pb" crypto "github.com/libp2p/go-libp2p/core/crypto" net "github.com/libp2p/go-libp2p/core/network" peer "github.com/libp2p/go-libp2p/core/peer" peerstore "github.com/libp2p/go-libp2p/core/peerstore" - pubsub "github.com/libp2p/go-libp2p-pubsub" - pb "github.com/libp2p/go-libp2p-pubsub/pb" discovery "github.com/libp2p/go-libp2p/p2p/discovery/routing" "github.com/multiformats/go-multiaddr" "golang.org/x/crypto/blake2b" diff --git a/src/app/libp2p_helper/src/libp2p_helper/data.go b/src/app/libp2p_helper/src/libp2p_helper/data.go index 7641a0790c0c..e07f903ef02c 100644 --- a/src/app/libp2p_helper/src/libp2p_helper/data.go +++ b/src/app/libp2p_helper/src/libp2p_helper/data.go @@ -15,9 +15,9 @@ import ( "codanet" capnp "capnproto.org/go/capnp/v3" + pubsub "github.com/libp2p/go-libp2p-pubsub" net "github.com/libp2p/go-libp2p/core/network" peer "github.com/libp2p/go-libp2p/core/peer" - pubsub "github.com/libp2p/go-libp2p-pubsub" ) type app struct { diff --git a/src/app/libp2p_helper/src/libp2p_helper/main_test.go b/src/app/libp2p_helper/src/libp2p_helper/main_test.go index 171365a9a3ef..e2ca0b67a332 100644 --- a/src/app/libp2p_helper/src/libp2p_helper/main_test.go +++ b/src/app/libp2p_helper/src/libp2p_helper/main_test.go @@ -8,6 +8,7 @@ import ( "io/ioutil" "os" "strings" + "sync" "testing" "time" @@ -263,9 +264,12 @@ func TestLibp2pMetrics(t *testing.T) { require.NoError(t, err) var streamIdx uint64 = 0 + var streamMutex sync.Mutex handler := func(stream net.Stream) { + streamMutex.Lock() handleStreamReads(appB, stream, streamIdx) streamIdx++ + streamMutex.Unlock() } appB.P2p.Host.SetStreamHandler(testProtocol, handler) diff --git a/src/app/libp2p_helper/src/libp2p_helper/multinode_test.go b/src/app/libp2p_helper/src/libp2p_helper/multinode_test.go index 62bd54584cfd..5b2a66de368f 100644 --- a/src/app/libp2p_helper/src/libp2p_helper/multinode_test.go +++ b/src/app/libp2p_helper/src/libp2p_helper/multinode_test.go @@ -15,10 +15,10 @@ import ( capnp "capnproto.org/go/capnp/v3" logging "github.com/ipfs/go-log/v2" - "github.com/libp2p/go-libp2p/core/crypto" - "github.com/libp2p/go-libp2p/core/peer" kb "github.com/libp2p/go-libp2p-kbucket" pubsub "github.com/libp2p/go-libp2p-pubsub" + "github.com/libp2p/go-libp2p/core/crypto" + "github.com/libp2p/go-libp2p/core/peer" "github.com/stretchr/testify/require" ) diff --git a/src/app/libp2p_helper/src/libp2p_helper/peer_msg.go b/src/app/libp2p_helper/src/libp2p_helper/peer_msg.go index 0b37f4010b93..a5a88fe9edd4 100644 --- a/src/app/libp2p_helper/src/libp2p_helper/peer_msg.go +++ b/src/app/libp2p_helper/src/libp2p_helper/peer_msg.go @@ -41,7 +41,7 @@ func (m AddPeerReq) handle(app *app, seqno uint64) *capnp.Message { } app.AddedPeers = append(app.AddedPeers, *info) - app.P2p.GatingState().TrustedPeers[info.ID] = struct{}{} + app.P2p.GatingState().TrustPeer(info.ID) if app.Bootstrapper != nil { app.Bootstrapper.Close() diff --git a/src/app/libp2p_helper/src/libp2p_helper/pubsub_msg.go b/src/app/libp2p_helper/src/libp2p_helper/pubsub_msg.go index 11e23382468c..0008a8d483aa 100644 --- a/src/app/libp2p_helper/src/libp2p_helper/pubsub_msg.go +++ b/src/app/libp2p_helper/src/libp2p_helper/pubsub_msg.go @@ -8,8 +8,8 @@ import ( capnp "capnproto.org/go/capnp/v3" "github.com/go-errors/errors" - peer "github.com/libp2p/go-libp2p/core/peer" pubsub "github.com/libp2p/go-libp2p-pubsub" + peer "github.com/libp2p/go-libp2p/core/peer" ) type ValidationPushT = ipc.Libp2pHelperInterface_Validation diff --git a/src/app/libp2p_helper/src/libp2p_helper/pubsub_msg_test.go b/src/app/libp2p_helper/src/libp2p_helper/pubsub_msg_test.go index 7ae62a55f510..1dd1d4dbc99a 100644 --- a/src/app/libp2p_helper/src/libp2p_helper/pubsub_msg_test.go +++ b/src/app/libp2p_helper/src/libp2p_helper/pubsub_msg_test.go @@ -117,15 +117,11 @@ func TestValidationPush(t *testing.T) { } for i := 0; i < len(ipcValResults); i++ { - result := ValidationUnknown seqno := uint64(i) status := &validationStatus{ Completion: make(chan pubsub.ValidationResult), } testApp.Validators[seqno] = status - go func() { - result = <-status.Completion - }() _, seg, err := capnp.NewMessage(capnp.SingleSegment(nil)) require.NoError(t, err) m, err := ipc.NewRootLibp2pHelperInterface_Validation(seg) @@ -135,6 +131,7 @@ func TestValidationPush(t *testing.T) { m.SetResult(ipcValResults[i]) ValidationPush(m).handle(testApp) require.NoError(t, err) + result := <-status.Completion require.Equal(t, pubsubValResults[i], result) _, has := testApp.Validators[seqno] require.False(t, has) diff --git a/src/app/libp2p_helper/src/libp2p_helper/util_test.go b/src/app/libp2p_helper/src/libp2p_helper/util_test.go index bb18e8c9468b..b3e9530259de 100644 --- a/src/app/libp2p_helper/src/libp2p_helper/util_test.go +++ b/src/app/libp2p_helper/src/libp2p_helper/util_test.go @@ -12,12 +12,12 @@ import ( "codanet" + pubsub "github.com/libp2p/go-libp2p-pubsub" "github.com/libp2p/go-libp2p/core/crypto" "github.com/libp2p/go-libp2p/core/host" net "github.com/libp2p/go-libp2p/core/network" "github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-libp2p/core/protocol" - pubsub "github.com/libp2p/go-libp2p-pubsub" ma "github.com/multiformats/go-multiaddr" From c4acd2e5617f32c0381de4f364439bda222329bd Mon Sep 17 00:00:00 2001 From: Bruno Deferrari Date: Mon, 13 Nov 2023 21:11:35 -0300 Subject: [PATCH 015/119] Protect shared data accesses --- src/app/libp2p_helper/src/codanet.go | 12 +- .../libp2p_helper/src/libp2p_helper/app.go | 158 +++++++++++++++++- .../src/libp2p_helper/config_msg.go | 12 +- .../src/libp2p_helper/config_msg_test.go | 2 +- .../libp2p_helper/src/libp2p_helper/data.go | 19 ++- .../src/libp2p_helper/incoming_msg.go | 1 + .../src/libp2p_helper/peer_msg.go | 4 +- .../src/libp2p_helper/pubsub_msg.go | 51 ++---- .../src/libp2p_helper/pubsub_msg_test.go | 12 +- .../src/libp2p_helper/stream_msg.go | 84 +++------- .../src/libp2p_helper/stream_msg_test.go | 8 +- .../src/libp2p_helper/util_test.go | 11 +- 12 files changed, 239 insertions(+), 135 deletions(-) diff --git a/src/app/libp2p_helper/src/codanet.go b/src/app/libp2p_helper/src/codanet.go index b54ed689beed..65b3c20c3528 100644 --- a/src/app/libp2p_helper/src/codanet.go +++ b/src/app/libp2p_helper/src/codanet.go @@ -234,7 +234,8 @@ type Helper struct { ConnectionManager *CodaConnectionManager BandwidthCounter *metrics.BandwidthCounter MsgStats *MessageStats - Seeds []peer.AddrInfo + _seeds []peer.AddrInfo + seedsMutex sync.RWMutex NodeStatus []byte HeartbeatPeer func(peer.ID) } @@ -368,6 +369,13 @@ func (h *Helper) SetGatingState(gs *CodaGatingConfig) { } } +func (h *Helper) AddSeeds(infos ...peer.AddrInfo) { + // TODO: this "_seeds" field is never read anywhere, is it needed? + h.seedsMutex.Lock() + h._seeds = append(h._seeds, infos...) + h.seedsMutex.Unlock() +} + func (gs *CodaGatingState) TrustPeer(p peer.ID) { gs.trustedPeersMutex.Lock() gs.trustedPeers[p] = struct{}{} @@ -751,7 +759,7 @@ func MakeHelper(ctx context.Context, listenOn []ma.Multiaddr, externalAddr ma.Mu ConnectionManager: connManager, BandwidthCounter: bandwidthCounter, MsgStats: &MessageStats{min: math.MaxUint64}, - Seeds: seeds, + _seeds: seeds, HeartbeatPeer: func(p peer.ID) { lanPatcher.Heartbeat(p) wanPatcher.Heartbeat(p) diff --git a/src/app/libp2p_helper/src/libp2p_helper/app.go b/src/app/libp2p_helper/src/libp2p_helper/app.go index 309fb6a22460..8352596aa0bd 100644 --- a/src/app/libp2p_helper/src/libp2p_helper/app.go +++ b/src/app/libp2p_helper/src/libp2p_helper/app.go @@ -8,7 +8,6 @@ import ( "math" "os" "strconv" - "sync" "time" ipc "libp2p_ipc" @@ -29,14 +28,13 @@ func newApp() *app { return &app{ P2p: nil, Ctx: ctx, - Subs: make(map[uint64]subscription), - Topics: make(map[string]*pubsub.Topic), - ValidatorMutex: &sync.Mutex{}, - Validators: make(map[uint64]*validationStatus), - Streams: make(map[uint64]net.Stream), + _subs: make(map[uint64]subscription), + _topics: make(map[string]*pubsub.Topic), + _validators: make(map[uint64]*validationStatus), + _streams: make(map[uint64]net.Stream), OutChan: outChan, Out: bufio.NewWriter(os.Stdout), - AddedPeers: []peer.AddrInfo{}, + _addedPeers: []peer.AddrInfo{}, MetricsRefreshTime: time.Minute, metricsCollectionStarted: false, metricsServer: nil, @@ -64,6 +62,151 @@ func (app *app) NextId() uint64 { return app.counter } +func (app *app) AddPeers(infos ...peer.AddrInfo) { + app.addedPeersMutex.Lock() + app._addedPeers = append(app._addedPeers, infos...) + app.addedPeersMutex.Unlock() +} + +func (app *app) GetAddedPeers() []peer.AddrInfo { + app.addedPeersMutex.RLock() + copyOfAddedPeers := make([]peer.AddrInfo, len(app._addedPeers)) + copy(copyOfAddedPeers, app._addedPeers) + app.addedPeersMutex.RUnlock() + return copyOfAddedPeers +} + +func (app *app) ResetAddedPeers() { + app.addedPeersMutex.Lock() + app._addedPeers = nil + app.addedPeersMutex.Unlock() +} + +func (app *app) AddStream(stream net.Stream) uint64 { + streamIdx := app.NextId() + app.streamsMutex.Lock() + app._streams[streamIdx] = stream + app.streamsMutex.Unlock() + return streamIdx +} + +func (app *app) CloseStream(streamId uint64) error { + app.streamsMutex.Lock() + defer app.streamsMutex.Unlock() + if stream, ok := app._streams[streamId]; ok { + delete(app._streams, streamId) + err := stream.Close() + if err != nil { + return badp2p(err) + } + return nil + } + return badRPC(errors.New("unknown stream_idx")) +} + +func (app *app) ResetStream(streamId uint64) error { + app.streamsMutex.Lock() + defer app.streamsMutex.Unlock() + if stream, ok := app._streams[streamId]; ok { + delete(app._streams, streamId) + err := stream.Reset() + if err != nil { + return badp2p(err) + } + return nil + } + return badRPC(errors.New("unknown stream_idx")) +} + +func (app *app) StreamWrite(streamId uint64, data []byte) error { + // TODO Consider using a more fine-grained locking strategy, + // not using a global mutex to lock on a message sending + app.streamsMutex.Lock() + defer app.streamsMutex.Unlock() + if stream, ok := app._streams[streamId]; ok { + n, err := stream.Write(data) + if err != nil { + // TODO check that it's correct to error out, not repeat writing + delete(app._streams, streamId) + close_err := stream.Close() + if close_err != nil { + app.P2p.Logger.Errorf("failed to close stream %d after encountering write failure (%s): %s", streamId, err.Error(), close_err.Error()) + } + return wrapError(badp2p(err), fmt.Sprintf("only wrote %d out of %d bytes", n, len(data))) + } + return nil + } + return badRPC(errors.New("unknown stream_idx")) +} + +func (app *app) AddValidator() (uint64, chan pubsub.ValidationResult) { + seqno := app.NextId() + ch := make(chan pubsub.ValidationResult) + app.validatorMutex.Lock() + app._validators[seqno] = new(validationStatus) + app._validators[seqno].Completion = ch + app.validatorMutex.Unlock() + return seqno, ch +} + +func (app *app) RemoveValidator(seqno uint64) { + app.validatorMutex.Lock() + delete(app._validators, seqno) + app.validatorMutex.Unlock() +} + +func (app *app) TimeoutValidator(seqno uint64) { + now := time.Now() + app.validatorMutex.Lock() + app._validators[seqno].TimedOutAt = &now + app.validatorMutex.Unlock() +} + +func (app *app) FinishValidator(seqno uint64, finish func(st *validationStatus)) bool { + app.validatorMutex.Lock() + defer app.validatorMutex.Unlock() + if st, ok := app._validators[seqno]; ok { + finish(st) + delete(app._validators, seqno) + return true + } else { + return false + } +} + +func (app *app) AddTopic(topicName string, topic *pubsub.Topic) { + app.topicsMutex.Lock() + app._topics[topicName] = topic + app.topicsMutex.Unlock() +} + +func (app *app) GetTopic(topicName string) (*pubsub.Topic, bool) { + app.topicsMutex.RLock() + topic, has := app._topics[topicName] + app.topicsMutex.RUnlock() + return topic, has +} + +func (app *app) AddSubscription(subId uint64, sub subscription) { + app.subsMutex.Lock() + app._subs[subId] = sub + app.subsMutex.Unlock() +} + +func (app *app) CancelSubscription(subId uint64) bool { + app.subsMutex.Lock() + defer app.subsMutex.Unlock() + + if sub, ok := app._subs[subId]; ok { + sub.Sub.Cancel() + sub.Cancel() + delete(app._subs, subId) + return true + } + + return false +} + func parseMultiaddrWithID(ma multiaddr.Multiaddr, id peer.ID) (*codaPeerInfo, error) { ipComponent, tcpMaddr := multiaddr.SplitFirst(ma) if !(ipComponent.Protocol().Code == multiaddr.P_IP4 || ipComponent.Protocol().Code == multiaddr.P_IP6) { @@ -96,6 +239,7 @@ func addrInfoOfString(maddr string) (*peer.AddrInfo, error) { return info, nil } +// Writes a message back to the OCaml node func (app *app) writeMsg(msg *capnp.Message) { if app.NoUpcalls { return diff --git a/src/app/libp2p_helper/src/libp2p_helper/config_msg.go b/src/app/libp2p_helper/src/libp2p_helper/config_msg.go index ef9e215a5c3d..b992900149c1 100644 --- a/src/app/libp2p_helper/src/libp2p_helper/config_msg.go +++ b/src/app/libp2p_helper/src/libp2p_helper/config_msg.go @@ -36,7 +36,7 @@ func (msg BeginAdvertisingReq) handle(app *app, seqno uint64) *capnp.Message { return mkRpcRespError(seqno, needsConfigure()) } app.SetConnectionHandlers() - for _, info := range app.AddedPeers { + for _, info := range app.GetAddedPeers() { app.P2p.Logger.Debug("Trying to connect to: ", info) err := app.P2p.Host.Connect(app.Ctx, info) if err != nil { @@ -334,7 +334,7 @@ func (msg ConfigureReq) handle(app *app, seqno uint64) *capnp.Message { return mkRpcRespError(seqno, badRPC(err)) } - app.AddedPeers = append(app.AddedPeers, seeds...) + app.AddPeers(seeds...) directPeersMaList, err := m.DirectPeers() if err != nil { @@ -372,12 +372,12 @@ func (msg ConfigureReq) handle(app *app, seqno uint64) *capnp.Message { if err != nil { return mkRpcRespError(seqno, badRPC(err)) } - gatingConfig, err := readGatingConfig(gc, app.AddedPeers) + gatingConfig, err := readGatingConfig(gc, app.GetAddedPeers()) if err != nil { return mkRpcRespError(seqno, badRPC(err)) } if gc.CleanAddedPeers() { - app.AddedPeers = nil + app.ResetAddedPeers() } stateDir, err := m.Statedir() @@ -593,13 +593,13 @@ func (m SetGatingConfigReq) handle(app *app, seqno uint64) *capnp.Message { var gatingConfig *codanet.CodaGatingConfig gc, err := SetGatingConfigReqT(m).GatingConfig() if err == nil { - gatingConfig, err = readGatingConfig(gc, app.AddedPeers) + gatingConfig, err = readGatingConfig(gc, app.GetAddedPeers()) } if err != nil { return mkRpcRespError(seqno, badRPC(err)) } if gc.CleanAddedPeers() { - app.AddedPeers = nil + app.ResetAddedPeers() } app.P2p.SetGatingState(gatingConfig) diff --git a/src/app/libp2p_helper/src/libp2p_helper/config_msg_test.go b/src/app/libp2p_helper/src/libp2p_helper/config_msg_test.go index d5a6330492c1..674899e1cd3e 100644 --- a/src/app/libp2p_helper/src/libp2p_helper/config_msg_test.go +++ b/src/app/libp2p_helper/src/libp2p_helper/config_msg_test.go @@ -40,7 +40,7 @@ func TestDHTDiscovery_TwoNodes(t *testing.T) { require.NoError(t, err) appB, _ := newTestApp(t, appAInfos, true) - appB.AddedPeers = appAInfos + appB.AddPeers(appAInfos...) appB.NoMDNS = true // begin appB and appA's DHT advertising diff --git a/src/app/libp2p_helper/src/libp2p_helper/data.go b/src/app/libp2p_helper/src/libp2p_helper/data.go index e07f903ef02c..bf1748aaf493 100644 --- a/src/app/libp2p_helper/src/libp2p_helper/data.go +++ b/src/app/libp2p_helper/src/libp2p_helper/data.go @@ -23,16 +23,19 @@ import ( type app struct { P2p *codanet.Helper Ctx context.Context - Subs map[uint64]subscription - Topics map[string]*pubsub.Topic - Validators map[uint64]*validationStatus - ValidatorMutex *sync.Mutex - Streams map[uint64]net.Stream - StreamsMutex sync.Mutex + _subs map[uint64]subscription + subsMutex sync.Mutex + _topics map[string]*pubsub.Topic + topicsMutex sync.RWMutex + _validators map[uint64]*validationStatus + validatorMutex sync.Mutex + _streams map[uint64]net.Stream + streamsMutex sync.Mutex Out *bufio.Writer OutChan chan *capnp.Message Bootstrapper io.Closer - AddedPeers []peer.AddrInfo + addedPeersMutex sync.RWMutex + _addedPeers []peer.AddrInfo UnsafeNoTrustIP bool MetricsRefreshTime time.Duration metricsCollectionStarted bool @@ -54,8 +57,6 @@ type app struct { type subscription struct { Sub *pubsub.Subscription - Idx uint64 - Ctx context.Context Cancel context.CancelFunc } diff --git a/src/app/libp2p_helper/src/libp2p_helper/incoming_msg.go b/src/app/libp2p_helper/src/libp2p_helper/incoming_msg.go index 38b435772791..7346b7819ac7 100644 --- a/src/app/libp2p_helper/src/libp2p_helper/incoming_msg.go +++ b/src/app/libp2p_helper/src/libp2p_helper/incoming_msg.go @@ -40,6 +40,7 @@ var pushMesssageExtractors = map[ipc.Libp2pHelperInterface_PushMessage_Which]ext ipc.Libp2pHelperInterface_PushMessage_Which_heartbeatPeer: fromHeartbeatPeerPush, } +// Handles messages coming from the OCaml process func (app *app) handleIncomingMsg(msg *ipc.Libp2pHelperInterface_Message) { if msg.HasRpcRequest() { resp, err := func() (*capnp.Message, error) { diff --git a/src/app/libp2p_helper/src/libp2p_helper/peer_msg.go b/src/app/libp2p_helper/src/libp2p_helper/peer_msg.go index a5a88fe9edd4..74daed42656f 100644 --- a/src/app/libp2p_helper/src/libp2p_helper/peer_msg.go +++ b/src/app/libp2p_helper/src/libp2p_helper/peer_msg.go @@ -40,7 +40,7 @@ func (m AddPeerReq) handle(app *app, seqno uint64) *capnp.Message { return mkRpcRespError(seqno, badRPC(err)) } - app.AddedPeers = append(app.AddedPeers, *info) + app.AddPeers(*info) app.P2p.GatingState().TrustPeer(info.ID) if app.Bootstrapper != nil { @@ -50,7 +50,7 @@ func (m AddPeerReq) handle(app *app, seqno uint64) *capnp.Message { app.P2p.Logger.Info("addPeer Trying to connect to: ", info) if AddPeerReqT(m).IsSeed() { - app.P2p.Seeds = append(app.P2p.Seeds, *info) + app.P2p.AddSeeds(*info) } err = app.P2p.Host.Connect(app.Ctx, *info) diff --git a/src/app/libp2p_helper/src/libp2p_helper/pubsub_msg.go b/src/app/libp2p_helper/src/libp2p_helper/pubsub_msg.go index 0008a8d483aa..8ef176ae03e0 100644 --- a/src/app/libp2p_helper/src/libp2p_helper/pubsub_msg.go +++ b/src/app/libp2p_helper/src/libp2p_helper/pubsub_msg.go @@ -36,9 +36,7 @@ func (m ValidationPush) handle(app *app) { return } seqno := vid.Id() - app.ValidatorMutex.Lock() - defer app.ValidatorMutex.Unlock() - if st, ok := app.Validators[seqno]; ok { + found := app.FinishValidator(seqno, func(st *validationStatus) { res := ValidationUnknown switch ValidationPushT(m).Result() { case ipc.ValidationResult_accept: @@ -54,8 +52,9 @@ func (m ValidationPush) handle(app *app) { if st.TimedOutAt != nil { app.P2p.Logger.Errorf("validation for item %d took %d seconds", seqno, time.Now().Add(validationTimeout).Sub(*st.TimedOutAt)) } - delete(app.Validators, seqno) - } else { + }) + + if !found { app.P2p.Logger.Warnf("handleValidation: validation seqno %d unknown", seqno) } } @@ -87,12 +86,12 @@ func (m PublishReq) handle(app *app, seqno uint64) *capnp.Message { return mkRpcRespError(seqno, badRPC(err)) } - if topic, has = app.Topics[topicName]; !has { + if topic, has = app.GetTopic(topicName); !has { topic, err = app.P2p.Pubsub.Join(topicName) if err != nil { return mkRpcRespError(seqno, badp2p(err)) } - app.Topics[topicName] = topic + app.AddTopic(topicName, topic) } if err := topic.Publish(app.Ctx, data); err != nil { @@ -136,7 +135,7 @@ func (m SubscribeReq) handle(app *app, seqno uint64) *capnp.Message { return mkRpcRespError(seqno, badp2p(err)) } - app.Topics[topicName] = topic + app.AddTopic(topicName, topic) err = app.P2p.Pubsub.RegisterTopicValidator(topicName, func(ctx context.Context, id peer.ID, msg *pubsub.Message) pubsub.ValidationResult { app.P2p.Logger.Debugf("Received gossip message on topic %s from %s", topicName, id.Pretty()) @@ -148,12 +147,7 @@ func (m SubscribeReq) handle(app *app, seqno uint64) *capnp.Message { seenAt := time.Now() - seqno := app.NextId() - ch := make(chan pubsub.ValidationResult) - app.ValidatorMutex.Lock() - app.Validators[seqno] = new(validationStatus) - app.Validators[seqno].Completion = ch - app.ValidatorMutex.Unlock() + seqno, ch := app.AddValidator() app.P2p.Logger.Info("validating a new pubsub message ...") @@ -161,17 +155,14 @@ func (m SubscribeReq) handle(app *app, seqno uint64) *capnp.Message { if err != nil && !app.UnsafeNoTrustIP { app.P2p.Logger.Errorf("failed to connect to peer %s that just sent us a pubsub message, dropping it", peer.Encode(id)) - app.ValidatorMutex.Lock() - defer app.ValidatorMutex.Unlock() - delete(app.Validators, seqno) + app.RemoveValidator(seqno) return pubsub.ValidationIgnore } deadline, ok := ctx.Deadline() if !ok { app.P2p.Logger.Errorf("no deadline set on validation context") - defer app.ValidatorMutex.Unlock() - delete(app.Validators, seqno) + app.RemoveValidator(seqno) return pubsub.ValidationIgnore } app.writeMsg(mkGossipReceivedUpcall(sender, deadline, seenAt, msg.Data, seqno, subId)) @@ -187,12 +178,7 @@ func (m SubscribeReq) handle(app *app, seqno uint64) *capnp.Message { validationTimeoutMetric.Inc() - app.ValidatorMutex.Lock() - - now := time.Now() - app.Validators[seqno].TimedOutAt = &now - - app.ValidatorMutex.Unlock() + app.TimeoutValidator(seqno) if app.UnsafeNoTrustIP { app.P2p.Logger.Info("validated anyway!") @@ -228,12 +214,11 @@ func (m SubscribeReq) handle(app *app, seqno uint64) *capnp.Message { } ctx, cancel := context.WithCancel(app.Ctx) - app.Subs[subId] = subscription{ + app.AddSubscription(subId, subscription{ Sub: sub, - Idx: subId, - Ctx: ctx, Cancel: cancel, - } + }) + go func() { for { _, err = sub.Next(ctx) @@ -268,14 +253,12 @@ func (m UnsubscribeReq) handle(app *app, seqno uint64) *capnp.Message { return mkRpcRespError(seqno, badRPC(err)) } subId := subId_.Id() - if sub, ok := app.Subs[subId]; ok { - sub.Sub.Cancel() - sub.Cancel() - delete(app.Subs, subId) + if app.CancelSubscription(subId) { return mkRpcRespSuccess(seqno, func(m *ipc.Libp2pHelperInterface_RpcResponseSuccess) { _, err := m.NewUnsubscribe() panicOnErr(err) }) + } else { + return mkRpcRespError(seqno, badRPC(errors.New("subscription not found"))) } - return mkRpcRespError(seqno, badRPC(errors.New("subscription not found"))) } diff --git a/src/app/libp2p_helper/src/libp2p_helper/pubsub_msg_test.go b/src/app/libp2p_helper/src/libp2p_helper/pubsub_msg_test.go index 1dd1d4dbc99a..6d8ae65579f3 100644 --- a/src/app/libp2p_helper/src/libp2p_helper/pubsub_msg_test.go +++ b/src/app/libp2p_helper/src/libp2p_helper/pubsub_msg_test.go @@ -27,7 +27,7 @@ func testPublishDo(t *testing.T, app *app, topic string, data []byte, rpcSeqno u _, err = respSuccess.Publish() require.NoError(t, err) - _, has := app.Topics[topic] + _, has := app._topics[topic] require.True(t, has) } @@ -55,9 +55,9 @@ func testSubscribeDo(t *testing.T, app *app, topic string, subId uint64, rpcSeqn _, err = respSuccess.Subscribe() require.NoError(t, err) - _, has := app.Topics[topic] + _, has := app._topics[topic] require.True(t, has) - _, has = app.Subs[subId] + _, has = app._subs[subId] require.True(t, has) } @@ -97,7 +97,7 @@ func TestUnsubscribe(t *testing.T) { _, err = respSuccess.Unsubscribe() require.NoError(t, err) - _, has := testApp.Subs[idx] + _, has := testApp._subs[idx] require.False(t, has) } @@ -121,7 +121,7 @@ func TestValidationPush(t *testing.T) { status := &validationStatus{ Completion: make(chan pubsub.ValidationResult), } - testApp.Validators[seqno] = status + testApp._validators[seqno] = status _, seg, err := capnp.NewMessage(capnp.SingleSegment(nil)) require.NoError(t, err) m, err := ipc.NewRootLibp2pHelperInterface_Validation(seg) @@ -133,7 +133,7 @@ func TestValidationPush(t *testing.T) { require.NoError(t, err) result := <-status.Completion require.Equal(t, pubsubValResults[i], result) - _, has := testApp.Validators[seqno] + _, has := testApp._validators[seqno] require.False(t, has) } } diff --git a/src/app/libp2p_helper/src/libp2p_helper/stream_msg.go b/src/app/libp2p_helper/src/libp2p_helper/stream_msg.go index bc4cc9ad827e..c25456db3c73 100644 --- a/src/app/libp2p_helper/src/libp2p_helper/stream_msg.go +++ b/src/app/libp2p_helper/src/libp2p_helper/stream_msg.go @@ -2,13 +2,11 @@ package main import ( "context" - "fmt" "time" ipc "libp2p_ipc" capnp "capnproto.org/go/capnp/v3" - "github.com/go-errors/errors" net "github.com/libp2p/go-libp2p/core/network" peer "github.com/libp2p/go-libp2p/core/peer" protocol "github.com/libp2p/go-libp2p/core/protocol" @@ -35,12 +33,9 @@ func (m AddStreamHandlerReq) handle(app *app, seqno uint64) *capnp.Message { app.P2p.Logger.Errorf("failed to parse remote connection information, silently dropping stream: %s", err.Error()) return } - streamIdx := app.NextId() - app.StreamsMutex.Lock() - defer app.StreamsMutex.Unlock() - app.Streams[streamIdx] = stream - app.writeMsg(mkIncomingStreamUpcall(peerinfo, streamIdx, protocolId)) + streamIdx := app.AddStream(stream) handleStreamReads(app, stream, streamIdx) + app.writeMsg(mkIncomingStreamUpcall(peerinfo, streamIdx, protocolId)) }) return mkRpcRespSuccess(seqno, func(m *ipc.Libp2pHelperInterface_RpcResponseSuccess) { @@ -65,20 +60,14 @@ func (m CloseStreamReq) handle(app *app, seqno uint64) *capnp.Message { return mkRpcRespError(seqno, badRPC(err)) } streamId := sid.Id() - app.StreamsMutex.Lock() - defer app.StreamsMutex.Unlock() - if stream, ok := app.Streams[streamId]; ok { - delete(app.Streams, streamId) - err := stream.Close() - if err != nil { - return mkRpcRespError(seqno, badp2p(err)) - } - return mkRpcRespSuccess(seqno, func(m *ipc.Libp2pHelperInterface_RpcResponseSuccess) { - _, err := m.NewCloseStream() - panicOnErr(err) - }) + err = app.CloseStream(streamId) + if err != nil { + return mkRpcRespError(seqno, err) } - return mkRpcRespError(seqno, badRPC(errors.New("unknown stream_idx"))) + return mkRpcRespSuccess(seqno, func(m *ipc.Libp2pHelperInterface_RpcResponseSuccess) { + _, err := m.NewCloseStream() + panicOnErr(err) + }) } type OpenStreamReqT = ipc.Libp2pHelperInterface_OpenStream_Request @@ -93,7 +82,6 @@ func (m OpenStreamReq) handle(app *app, seqno uint64) *capnp.Message { return mkRpcRespError(seqno, needsConfigure()) } - streamIdx := app.NextId() var peerDecoded peer.ID var protocolId string err := func() error { @@ -133,15 +121,14 @@ func (m OpenStreamReq) handle(app *app, seqno uint64) *capnp.Message { return mkRpcRespError(seqno, badp2p(err)) } - app.StreamsMutex.Lock() - defer app.StreamsMutex.Unlock() - app.Streams[streamIdx] = stream + streamIdx := app.AddStream(stream) go func() { // FIXME HACK: allow time for the openStreamResult to get printed before we start inserting stream events time.Sleep(250 * time.Millisecond) // Note: It is _very_ important that we call handleStreamReads here -- this is how the "caller" side of the stream starts listening to the responses from the RPCs. Do not remove. handleStreamReads(app, stream, streamIdx) }() + return mkRpcRespSuccess(seqno, func(m *ipc.Libp2pHelperInterface_RpcResponseSuccess) { resp, err := m.NewOpenStream() panicOnErr(err) @@ -193,21 +180,14 @@ func (m ResetStreamReq) handle(app *app, seqno uint64) *capnp.Message { return mkRpcRespError(seqno, badRPC(err)) } streamId := sid.Id() - app.StreamsMutex.Lock() - if stream, ok := app.Streams[streamId]; ok { - delete(app.Streams, streamId) - app.StreamsMutex.Unlock() - err := stream.Reset() - if err != nil { - return mkRpcRespError(seqno, badp2p(err)) - } - return mkRpcRespSuccess(seqno, func(m *ipc.Libp2pHelperInterface_RpcResponseSuccess) { - _, err := m.NewResetStream() - panicOnErr(err) - }) + err = app.ResetStream(streamId) + if err != nil { + return mkRpcRespError(seqno, err) } - app.StreamsMutex.Unlock() - return mkRpcRespError(seqno, badRPC(errors.New("unknown stream_idx"))) + return mkRpcRespSuccess(seqno, func(m *ipc.Libp2pHelperInterface_RpcResponseSuccess) { + _, err := m.NewResetStream() + panicOnErr(err) + }) } type SendStreamReqT = ipc.Libp2pHelperInterface_SendStream_Request @@ -235,26 +215,14 @@ func (m SendStreamReq) handle(app *app, seqno uint64) *capnp.Message { } streamId := sid.Id() - // TODO Consider using a more fine-grained locking strategy, - // not using a global mutex to lock on a message sending - app.StreamsMutex.Lock() - defer app.StreamsMutex.Unlock() - if stream, ok := app.Streams[streamId]; ok { - n, err := stream.Write(data) - if err != nil { - // TODO check that it's correct to error out, not repeat writing - delete(app.Streams, streamId) - close_err := stream.Close() - if close_err != nil { - app.P2p.Logger.Errorf("failed to close stream %d after encountering write failure (%s): %s", streamId, err.Error(), close_err.Error()) - } + err = app.StreamWrite(streamId, data) - return mkRpcRespError(seqno, wrapError(badp2p(err), fmt.Sprintf("only wrote %d out of %d bytes", n, len(data)))) - } - return mkRpcRespSuccess(seqno, func(m *ipc.Libp2pHelperInterface_RpcResponseSuccess) { - _, err := m.NewSendStream() - panicOnErr(err) - }) + if err != nil { + return mkRpcRespError(seqno, err) } - return mkRpcRespError(seqno, badRPC(errors.New("unknown stream_idx"))) + + return mkRpcRespSuccess(seqno, func(m *ipc.Libp2pHelperInterface_RpcResponseSuccess) { + _, err := m.NewSendStream() + panicOnErr(err) + }) } diff --git a/src/app/libp2p_helper/src/libp2p_helper/stream_msg_test.go b/src/app/libp2p_helper/src/libp2p_helper/stream_msg_test.go index b21b6d3f2632..d3621a2f7881 100644 --- a/src/app/libp2p_helper/src/libp2p_helper/stream_msg_test.go +++ b/src/app/libp2p_helper/src/libp2p_helper/stream_msg_test.go @@ -76,7 +76,7 @@ func testOpenStreamDo(t *testing.T, appA *app, appBHost host.Host, appBPort uint require.Equal(t, appA.counter, respStreamId) - _, has := appA.Streams[respStreamId] + _, has := appA._streams[respStreamId] require.True(t, has) return respStreamId @@ -110,7 +110,7 @@ func testCloseStreamDo(t *testing.T, app *app, streamId uint64, rpcSeqno uint64) _, err = respSuccess.CloseStream() require.NoError(t, err) - _, has := app.Streams[streamId] + _, has := app._streams[streamId] require.False(t, has) } @@ -173,7 +173,7 @@ func testResetStreamDo(t *testing.T, app *app, streamId uint64, rpcSeqno uint64) _, err = respSuccess.ResetStream() require.NoError(t, err) - _, has := app.Streams[streamId] + _, has := app._streams[streamId] require.False(t, has) } @@ -201,7 +201,7 @@ func testSendStreamDo(t *testing.T, app *app, streamId uint64, msgBytes []byte, _, err = respSuccess.SendStream() require.NoError(t, err) - _, has := app.Streams[streamId] + _, has := app._streams[streamId] require.True(t, has) } diff --git a/src/app/libp2p_helper/src/libp2p_helper/util_test.go b/src/app/libp2p_helper/src/libp2p_helper/util_test.go index b3e9530259de..4aa7bb3db952 100644 --- a/src/app/libp2p_helper/src/libp2p_helper/util_test.go +++ b/src/app/libp2p_helper/src/libp2p_helper/util_test.go @@ -89,12 +89,11 @@ func newTestAppWithMaxConnsAndCtxAndGrace(t *testing.T, privkey crypto.PrivKey, return &app{ P2p: helper, Ctx: ctx, - Subs: make(map[uint64]subscription), - Topics: make(map[string]*pubsub.Topic), - ValidatorMutex: &sync.Mutex{}, - Validators: make(map[uint64]*validationStatus), - Streams: make(map[uint64]net.Stream), - AddedPeers: make([]peer.AddrInfo, 0, 512), + _subs: make(map[uint64]subscription), + _topics: make(map[string]*pubsub.Topic), + _validators: make(map[uint64]*validationStatus), + _streams: make(map[uint64]net.Stream), + _addedPeers: make([]peer.AddrInfo, 0, 512), OutChan: outChan, MetricsRefreshTime: time.Second * 2, NoUpcalls: noUpcalls, From 526066c9ce5e257aa4c27e94c9a376a84e94cb12 Mon Sep 17 00:00:00 2001 From: Bruno Deferrari Date: Wed, 15 Nov 2023 17:41:07 -0300 Subject: [PATCH 016/119] Use defers for mutex unlocks --- .../libp2p_helper/src/libp2p_helper/app.go | 20 +++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/src/app/libp2p_helper/src/libp2p_helper/app.go b/src/app/libp2p_helper/src/libp2p_helper/app.go index 8352596aa0bd..d5679953026d 100644 --- a/src/app/libp2p_helper/src/libp2p_helper/app.go +++ b/src/app/libp2p_helper/src/libp2p_helper/app.go @@ -64,29 +64,29 @@ func (app *app) NextId() uint64 { func (app *app) AddPeers(infos ...peer.AddrInfo) { app.addedPeersMutex.Lock() + defer app.addedPeersMutex.Unlock() app._addedPeers = append(app._addedPeers, infos...) - app.addedPeersMutex.Unlock() } func (app *app) GetAddedPeers() []peer.AddrInfo { app.addedPeersMutex.RLock() + defer app.addedPeersMutex.RUnlock() copyOfAddedPeers := make([]peer.AddrInfo, len(app._addedPeers)) copy(copyOfAddedPeers, app._addedPeers) - app.addedPeersMutex.RUnlock() return copyOfAddedPeers } func (app *app) ResetAddedPeers() { app.addedPeersMutex.Lock() + defer app.addedPeersMutex.Unlock() app._addedPeers = nil - app.addedPeersMutex.Unlock() } func (app *app) AddStream(stream net.Stream) uint64 { streamIdx := app.NextId() app.streamsMutex.Lock() + defer app.streamsMutex.Unlock() app._streams[streamIdx] = stream - app.streamsMutex.Unlock() return streamIdx } @@ -143,23 +143,23 @@ func (app *app) AddValidator() (uint64, chan pubsub.ValidationResult) { seqno := app.NextId() ch := make(chan pubsub.ValidationResult) app.validatorMutex.Lock() + defer app.validatorMutex.Unlock() app._validators[seqno] = new(validationStatus) app._validators[seqno].Completion = ch - app.validatorMutex.Unlock() return seqno, ch } func (app *app) RemoveValidator(seqno uint64) { app.validatorMutex.Lock() + defer app.validatorMutex.Unlock() delete(app._validators, seqno) - app.validatorMutex.Unlock() } func (app *app) TimeoutValidator(seqno uint64) { now := time.Now() app.validatorMutex.Lock() + defer app.validatorMutex.Unlock() app._validators[seqno].TimedOutAt = &now - app.validatorMutex.Unlock() } func (app *app) FinishValidator(seqno uint64, finish func(st *validationStatus)) bool { @@ -176,21 +176,21 @@ func (app *app) FinishValidator(seqno uint64, finish func(st *validationStatus)) func (app *app) AddTopic(topicName string, topic *pubsub.Topic) { app.topicsMutex.Lock() + defer app.topicsMutex.Unlock() app._topics[topicName] = topic - app.topicsMutex.Unlock() } func (app *app) GetTopic(topicName string) (*pubsub.Topic, bool) { app.topicsMutex.RLock() + defer app.topicsMutex.RUnlock() topic, has := app._topics[topicName] - app.topicsMutex.RUnlock() return topic, has } func (app *app) AddSubscription(subId uint64, sub subscription) { app.subsMutex.Lock() + defer app.subsMutex.Unlock() app._subs[subId] = sub - app.subsMutex.Unlock() } func (app *app) CancelSubscription(subId uint64) bool { From 5ec033426e7764c464b717076e520ac287ca007a Mon Sep 17 00:00:00 2001 From: georgeee Date: Thu, 16 Nov 2023 11:46:24 +0100 Subject: [PATCH 017/119] fixup! Fix go read/write races --- src/app/libp2p_helper/src/codanet.go | 87 ++++++++++++------- src/app/libp2p_helper/src/codanet_test.go | 4 +- .../src/libp2p_helper/main_test.go | 4 +- .../src/libp2p_helper/util_test.go | 2 +- 4 files changed, 61 insertions(+), 36 deletions(-) diff --git a/src/app/libp2p_helper/src/codanet.go b/src/app/libp2p_helper/src/codanet.go index 65b3c20c3528..113cecec954a 100644 --- a/src/app/libp2p_helper/src/codanet.go +++ b/src/app/libp2p_helper/src/codanet.go @@ -90,9 +90,9 @@ func isPrivateAddr(addr ma.Multiaddr) bool { type CodaConnectionManager struct { p2pManager *p2pconnmgr.BasicConnMgr - onConnectMutex sync.Mutex + onConnectMutex sync.RWMutex onConnect func(network.Network, network.Conn) - onDisconnectMutex sync.Mutex + onDisconnectMutex sync.RWMutex onDisconnect func(network.Network, network.Conn) // protectedMirror is a map of protected peer ids/tags, mirroring the structure in // BasicConnMgr which is not accessible from CodaConnectionManager @@ -102,22 +102,22 @@ type CodaConnectionManager struct { func (cm *CodaConnectionManager) AddOnConnectHandler(f func(network.Network, network.Conn)) { cm.onConnectMutex.Lock() + defer cm.onConnectMutex.Unlock() prevOnConnect := cm.onConnect cm.onConnect = func(net network.Network, c network.Conn) { prevOnConnect(net, c) f(net, c) } - cm.onConnectMutex.Unlock() } func (cm *CodaConnectionManager) AddOnDisconnectHandler(f func(network.Network, network.Conn)) { cm.onDisconnectMutex.Lock() + defer cm.onDisconnectMutex.Unlock() prevOnDisconnect := cm.onDisconnect cm.onDisconnect = func(net network.Network, c network.Conn) { prevOnDisconnect(net, c) f(net, c) } - cm.onDisconnectMutex.Unlock() } func newCodaConnectionManager(minConnections, maxConnections int, grace time.Duration) (*CodaConnectionManager, error) { @@ -197,18 +197,27 @@ func (cm *CodaConnectionManager) Listen(net network.Network, addr ma.Multiaddr) func (cm *CodaConnectionManager) ListenClose(net network.Network, addr ma.Multiaddr) { cm.p2pManager.Notifee().ListenClose(net, addr) } + +func (cm *CodaConnectionManager) onConnectHandler() func(net network.Network, c network.Conn) { + cm.onConnectMutex.RLock() + defer cm.onConnectMutex.RUnlock() + return cm.onConnect +} + func (cm *CodaConnectionManager) Connected(net network.Network, c network.Conn) { logger.Debugf("%s connected to %s", c.LocalPeer(), c.RemotePeer()) - cm.onConnectMutex.Lock() - cm.onConnect(net, c) - cm.onConnectMutex.Unlock() + cm.onConnectHandler()(net, c) cm.p2pManager.Notifee().Connected(net, c) } +func (cm *CodaConnectionManager) onDisconnectHandler() func(net network.Network, c network.Conn) { + cm.onDisconnectMutex.RLock() + defer cm.onDisconnectMutex.RUnlock() + return cm.onDisconnect +} + func (cm *CodaConnectionManager) Disconnected(net network.Network, c network.Conn) { - cm.onDisconnectMutex.Lock() - cm.onDisconnect(net, c) - cm.onDisconnectMutex.Unlock() + cm.onDisconnectHandler()(net, c) cm.p2pManager.Notifee().Disconnected(net, c) } @@ -284,10 +293,28 @@ func (ms *MessageStats) GetStats() *safeStats { } } -func (h *Helper) ResetGatingConfigTrustedAddrFilters() { +func (h *Helper) SetBannedPeers(newP map[peer.ID]struct{}) { + h.gatingState.bannedPeersMutex.Lock() + defer h.gatingState.bannedPeersMutex.Unlock() + h.gatingState.bannedPeers = newP +} + +func (h *Helper) SetTrustedPeers(newP map[peer.ID]struct{}) { + h.gatingState.trustedPeersMutex.Lock() + defer h.gatingState.trustedPeersMutex.Unlock() + h.gatingState.trustedPeers = newP +} + +func (h *Helper) SetTrustedAddrFilters(newF *ma.Filters) { h.gatingState.trustedAddrFiltersMutex.Lock() - h.gatingState.trustedAddrFilters = ma.NewFilters() - h.gatingState.trustedAddrFiltersMutex.Unlock() + defer h.gatingState.trustedAddrFiltersMutex.Unlock() + h.gatingState.trustedAddrFilters = newF +} + +func (h *Helper) SetBannedAddrFilters(newF *ma.Filters) { + h.gatingState.bannedAddrFiltersMutex.Lock() + defer h.gatingState.bannedAddrFiltersMutex.Unlock() + h.gatingState.bannedAddrFilters = newF } // this type implements the ConnectionGating interface @@ -296,13 +323,13 @@ func (h *Helper) ResetGatingConfigTrustedAddrFilters() { type CodaGatingState struct { logger logging.EventLogger KnownPrivateAddrFilters *ma.Filters - bannedAddrFiltersMutex sync.Mutex + bannedAddrFiltersMutex sync.RWMutex bannedAddrFilters *ma.Filters - trustedAddrFiltersMutex sync.Mutex + trustedAddrFiltersMutex sync.RWMutex trustedAddrFilters *ma.Filters - bannedPeersMutex sync.Mutex + bannedPeersMutex sync.RWMutex bannedPeers map[peer.ID]struct{} - trustedPeersMutex sync.Mutex + trustedPeersMutex sync.RWMutex trustedPeers map[peer.ID]struct{} } @@ -352,10 +379,10 @@ func (h *Helper) GatingState() *CodaGatingState { } func (h *Helper) SetGatingState(gs *CodaGatingConfig) { - h.gatingState.trustedPeers = gs.TrustedPeers - h.gatingState.bannedPeers = gs.BannedPeers - h.gatingState.trustedAddrFilters = gs.TrustedAddrFilters - h.gatingState.bannedAddrFilters = gs.BannedAddrFilters + h.SetTrustedPeers(gs.TrustedPeers) + h.SetBannedPeers(gs.BannedPeers) + h.SetTrustedAddrFilters(gs.TrustedAddrFilters) + h.SetBannedAddrFilters(gs.BannedAddrFilters) for _, c := range h.Host.Network().Conns() { pid := c.RemotePeer() maddr := c.RemoteMultiaddr() @@ -378,8 +405,8 @@ func (h *Helper) AddSeeds(infos ...peer.AddrInfo) { func (gs *CodaGatingState) TrustPeer(p peer.ID) { gs.trustedPeersMutex.Lock() + defer gs.trustedPeersMutex.Unlock() gs.trustedPeers[p] = struct{}{} - gs.trustedPeersMutex.Unlock() } func (gs *CodaGatingState) MarkPrivateAddrAsKnown(addr ma.Multiaddr) { @@ -427,9 +454,9 @@ func (c connectionAllowance) isDeny() bool { } func (gs *CodaGatingState) checkPeerTrusted(p peer.ID) connectionAllowance { - gs.trustedPeersMutex.Lock() + gs.trustedPeersMutex.RLock() + defer gs.trustedPeersMutex.RUnlock() _, isTrusted := gs.trustedPeers[p] - gs.trustedPeersMutex.Unlock() if isTrusted { return Accept } @@ -437,9 +464,9 @@ func (gs *CodaGatingState) checkPeerTrusted(p peer.ID) connectionAllowance { } func (gs *CodaGatingState) checkPeerBanned(p peer.ID) connectionAllowance { - gs.bannedPeersMutex.Lock() + gs.bannedPeersMutex.RLock() + defer gs.bannedPeersMutex.RUnlock() _, isBanned := gs.bannedPeers[p] - gs.bannedPeersMutex.Unlock() if isBanned { return DenyBannedPeer } @@ -474,8 +501,8 @@ func (gs *CodaGatingState) checkAllowedPeer(p peer.ID) connectionAllowance { } func (gs *CodaGatingState) checkAddrTrusted(addr ma.Multiaddr) connectionAllowance { - gs.trustedAddrFiltersMutex.Lock() - defer gs.trustedAddrFiltersMutex.Unlock() + gs.trustedAddrFiltersMutex.RLock() + defer gs.trustedAddrFiltersMutex.RUnlock() if !gs.trustedAddrFilters.AddrBlocked(addr) { return Accept } @@ -483,8 +510,8 @@ func (gs *CodaGatingState) checkAddrTrusted(addr ma.Multiaddr) connectionAllowan } func (gs *CodaGatingState) checkAddrBanned(addr ma.Multiaddr) connectionAllowance { - gs.bannedAddrFiltersMutex.Lock() - defer gs.bannedAddrFiltersMutex.Unlock() + gs.bannedAddrFiltersMutex.RLock() + defer gs.bannedAddrFiltersMutex.RUnlock() if gs.bannedAddrFilters.AddrBlocked(addr) { return DenyBannedAddress } diff --git a/src/app/libp2p_helper/src/codanet_test.go b/src/app/libp2p_helper/src/codanet_test.go index a1404af6e112..669467e559b5 100644 --- a/src/app/libp2p_helper/src/codanet_test.go +++ b/src/app/libp2p_helper/src/codanet_test.go @@ -37,9 +37,7 @@ func TestTrustedPrivateConnectionGating(t *testing.T) { allowed := gs.InterceptAddrDial(testInfo.ID, testMa) require.False(t, allowed) - gs.trustedPeersMutex.Lock() - gs.trustedPeers[testInfo.ID] = struct{}{} - gs.trustedPeersMutex.Unlock() + gs.TrustPeer(testInfo.ID) allowed = gs.InterceptAddrDial(testInfo.ID, testMa) require.True(t, allowed) } diff --git a/src/app/libp2p_helper/src/libp2p_helper/main_test.go b/src/app/libp2p_helper/src/libp2p_helper/main_test.go index e2ca0b67a332..2c8c67bdf4b2 100644 --- a/src/app/libp2p_helper/src/libp2p_helper/main_test.go +++ b/src/app/libp2p_helper/src/libp2p_helper/main_test.go @@ -266,10 +266,10 @@ func TestLibp2pMetrics(t *testing.T) { var streamIdx uint64 = 0 var streamMutex sync.Mutex handler := func(stream net.Stream) { - streamMutex.Lock() handleStreamReads(appB, stream, streamIdx) + streamMutex.Lock() + defer streamMutex.Unlock() streamIdx++ - streamMutex.Unlock() } appB.P2p.Host.SetStreamHandler(testProtocol, handler) diff --git a/src/app/libp2p_helper/src/libp2p_helper/util_test.go b/src/app/libp2p_helper/src/libp2p_helper/util_test.go index 4aa7bb3db952..0ea55f729a02 100644 --- a/src/app/libp2p_helper/src/libp2p_helper/util_test.go +++ b/src/app/libp2p_helper/src/libp2p_helper/util_test.go @@ -74,7 +74,7 @@ func newTestAppWithMaxConnsAndCtxAndGrace(t *testing.T, privkey crypto.PrivKey, ) require.NoError(t, err) - helper.ResetGatingConfigTrustedAddrFilters() + helper.SetTrustedAddrFilters(ma.NewFilters()) helper.Host.SetStreamHandler(testProtocol, testStreamHandler) t.Cleanup(func() { From 036464d98d2d12116830e490c6cb00d4a9d5eda3 Mon Sep 17 00:00:00 2001 From: georgeee Date: Thu, 16 Nov 2023 12:56:15 +0100 Subject: [PATCH 018/119] Remove hack in openStream RPC Problem: RPC protocol requires response to stream open RPC to arrive before any message from the stream. This was implemented with use of an ugly hack. Solution: remove hack, introduce notion of after-write handler in to be executed after the rpc response is written to output. --- .../libp2p_helper/src/libp2p_helper/app.go | 2 +- .../src/libp2p_helper/bandwidth_msg.go | 2 +- .../src/libp2p_helper/bitswap_msg.go | 4 +-- .../src/libp2p_helper/config_msg.go | 14 +++++----- .../src/libp2p_helper/config_msg_test.go | 12 ++++----- .../src/libp2p_helper/incoming_msg.go | 18 ++++++++----- .../libp2p_helper/src/libp2p_helper/msg.go | 19 +++++++++++--- .../src/libp2p_helper/peer_msg.go | 6 ++--- .../src/libp2p_helper/peer_msg_test.go | 6 ++--- .../src/libp2p_helper/pubsub_msg.go | 6 ++--- .../src/libp2p_helper/pubsub_msg_test.go | 6 ++--- .../src/libp2p_helper/stream_msg.go | 26 ++++++++----------- .../src/libp2p_helper/stream_msg_test.go | 18 ++++++------- .../src/libp2p_helper/util_test.go | 3 ++- 14 files changed, 78 insertions(+), 64 deletions(-) diff --git a/src/app/libp2p_helper/src/libp2p_helper/app.go b/src/app/libp2p_helper/src/libp2p_helper/app.go index d5679953026d..369cec9dc204 100644 --- a/src/app/libp2p_helper/src/libp2p_helper/app.go +++ b/src/app/libp2p_helper/src/libp2p_helper/app.go @@ -23,7 +23,7 @@ import ( ) func newApp() *app { - outChan := make(chan *capnp.Message, 1<<12) // 4kb + outChan := make(chan *capnp.Message, 1<<12) // 4096 messages stacked ctx := context.Background() return &app{ P2p: nil, diff --git a/src/app/libp2p_helper/src/libp2p_helper/bandwidth_msg.go b/src/app/libp2p_helper/src/libp2p_helper/bandwidth_msg.go index 7d8209ce68ee..cee135d24e64 100644 --- a/src/app/libp2p_helper/src/libp2p_helper/bandwidth_msg.go +++ b/src/app/libp2p_helper/src/libp2p_helper/bandwidth_msg.go @@ -17,7 +17,7 @@ func fromBandwidthInfoReq(req ipcRpcRequest) (rpcRequest, error) { return BandwidthInfoReq(i), err } -func (msg BandwidthInfoReq) handle(app *app, seqno uint64) *capnp.Message { +func (msg BandwidthInfoReq) handle(app *app, seqno uint64) (*capnp.Message, func()) { if app.P2p == nil { return mkRpcRespError(seqno, needsConfigure()) } diff --git a/src/app/libp2p_helper/src/libp2p_helper/bitswap_msg.go b/src/app/libp2p_helper/src/libp2p_helper/bitswap_msg.go index 741f5a4b4c17..ab6f18ec1401 100644 --- a/src/app/libp2p_helper/src/libp2p_helper/bitswap_msg.go +++ b/src/app/libp2p_helper/src/libp2p_helper/bitswap_msg.go @@ -97,7 +97,7 @@ func fromTestDecodeBitswapBlocksReq(req ipcRpcRequest) (rpcRequest, error) { return TestDecodeBitswapBlocksReq(i), err } -func (m TestDecodeBitswapBlocksReq) handle(app *app, seqno uint64) *capnp.Message { +func (m TestDecodeBitswapBlocksReq) handle(app *app, seqno uint64) (*capnp.Message, func()) { blocks, err := TestDecodeBitswapBlocksReqT(m).Blocks() if err != nil { return mkRpcRespError(seqno, badRPC(err)) @@ -156,7 +156,7 @@ func fromTestEncodeBitswapBlocksReq(req ipcRpcRequest) (rpcRequest, error) { return TestEncodeBitswapBlocksReq(i), err } -func (m TestEncodeBitswapBlocksReq) handle(app *app, seqno uint64) *capnp.Message { +func (m TestEncodeBitswapBlocksReq) handle(app *app, seqno uint64) (*capnp.Message, func()) { mr := TestEncodeBitswapBlocksReqT(m) data, err := mr.Data() diff --git a/src/app/libp2p_helper/src/libp2p_helper/config_msg.go b/src/app/libp2p_helper/src/libp2p_helper/config_msg.go index b992900149c1..5ea9ee4d4250 100644 --- a/src/app/libp2p_helper/src/libp2p_helper/config_msg.go +++ b/src/app/libp2p_helper/src/libp2p_helper/config_msg.go @@ -31,7 +31,7 @@ func fromBeginAdvertisingReq(req ipcRpcRequest) (rpcRequest, error) { i, err := req.BeginAdvertising() return BeginAdvertisingReq(i), err } -func (msg BeginAdvertisingReq) handle(app *app, seqno uint64) *capnp.Message { +func (msg BeginAdvertisingReq) handle(app *app, seqno uint64) (*capnp.Message, func()) { if app.P2p == nil { return mkRpcRespError(seqno, needsConfigure()) } @@ -293,7 +293,7 @@ func fromConfigureReq(req ipcRpcRequest) (rpcRequest, error) { i, err := req.Configure() return ConfigureReq(i), err } -func (msg ConfigureReq) handle(app *app, seqno uint64) *capnp.Message { +func (msg ConfigureReq) handle(app *app, seqno uint64) (*capnp.Message, func()) { m, err := ConfigureReqT(msg).Config() if err != nil { return mkRpcRespError(seqno, badRPC(err)) @@ -487,7 +487,7 @@ func fromGetListeningAddrsReq(req ipcRpcRequest) (rpcRequest, error) { i, err := req.GetListeningAddrs() return GetListeningAddrsReq(i), err } -func (msg GetListeningAddrsReq) handle(app *app, seqno uint64) *capnp.Message { +func (msg GetListeningAddrsReq) handle(app *app, seqno uint64) (*capnp.Message, func()) { if app.P2p == nil { return mkRpcRespError(seqno, needsConfigure()) } @@ -508,7 +508,7 @@ func fromGenerateKeypairReq(req ipcRpcRequest) (rpcRequest, error) { i, err := req.GenerateKeypair() return GenerateKeypairReq(i), err } -func (msg GenerateKeypairReq) handle(app *app, seqno uint64) *capnp.Message { +func (msg GenerateKeypairReq) handle(app *app, seqno uint64) (*capnp.Message, func()) { privk, pubk, err := crypto.GenerateEd25519Key(cryptorand.Reader) if err != nil { return mkRpcRespError(seqno, badp2p(err)) @@ -548,7 +548,7 @@ func fromListenReq(req ipcRpcRequest) (rpcRequest, error) { i, err := req.Listen() return ListenReq(i), err } -func (m ListenReq) handle(app *app, seqno uint64) *capnp.Message { +func (m ListenReq) handle(app *app, seqno uint64) (*capnp.Message, func()) { if app.P2p == nil { return mkRpcRespError(seqno, needsConfigure()) } @@ -586,7 +586,7 @@ func fromSetGatingConfigReq(req ipcRpcRequest) (rpcRequest, error) { i, err := req.SetGatingConfig() return SetGatingConfigReq(i), err } -func (m SetGatingConfigReq) handle(app *app, seqno uint64) *capnp.Message { +func (m SetGatingConfigReq) handle(app *app, seqno uint64) (*capnp.Message, func()) { if app.P2p == nil { return mkRpcRespError(seqno, needsConfigure()) } @@ -616,7 +616,7 @@ func fromSetNodeStatusReq(req ipcRpcRequest) (rpcRequest, error) { i, err := req.SetNodeStatus() return SetNodeStatusReq(i), err } -func (m SetNodeStatusReq) handle(app *app, seqno uint64) *capnp.Message { +func (m SetNodeStatusReq) handle(app *app, seqno uint64) (*capnp.Message, func()) { status, err := SetNodeStatusReqT(m).Status() if err != nil { return mkRpcRespError(seqno, badRPC(err)) diff --git a/src/app/libp2p_helper/src/libp2p_helper/config_msg_test.go b/src/app/libp2p_helper/src/libp2p_helper/config_msg_test.go index 674899e1cd3e..2b8070932ae5 100644 --- a/src/app/libp2p_helper/src/libp2p_helper/config_msg_test.go +++ b/src/app/libp2p_helper/src/libp2p_helper/config_msg_test.go @@ -190,7 +190,7 @@ func TestConfigure(t *testing.T) { require.NoError(t, err) gc.SetIsolate(false) - resMsg := ConfigureReq(m).handle(testApp, 239) + resMsg, _ := ConfigureReq(m).handle(testApp, 239) require.NoError(t, err) seqno, respSuccess := checkRpcResponseSuccess(t, resMsg, "configure") require.Equal(t, seqno, uint64(239)) @@ -206,7 +206,7 @@ func TestGenerateKeypair(t *testing.T) { require.NoError(t, err) testApp, _ := newTestApp(t, nil, true) - resMsg := GenerateKeypairReq(m).handle(testApp, 7839) + resMsg, _ := GenerateKeypairReq(m).handle(testApp, 7839) require.NoError(t, err) seqno, respSuccess := checkRpcResponseSuccess(t, resMsg, "generateKeypair") require.Equal(t, seqno, uint64(7839)) @@ -239,7 +239,7 @@ func TestGetListeningAddrs(t *testing.T) { m, err := ipc.NewRootLibp2pHelperInterface_GetListeningAddrs_Request(seg) require.NoError(t, err) var mRpcSeqno uint64 = 1024 - resMsg := GetListeningAddrsReq(m).handle(testApp, mRpcSeqno) + resMsg, _ := GetListeningAddrsReq(m).handle(testApp, mRpcSeqno) seqno, respSuccess := checkRpcResponseSuccess(t, resMsg, "getListeningAddrs") require.Equal(t, seqno, mRpcSeqno) require.True(t, respSuccess.HasGetListeningAddrs()) @@ -265,7 +265,7 @@ func TestListen(t *testing.T) { require.NoError(t, iface.SetRepresentation(addrStr)) require.NoError(t, err) - resMsg := ListenReq(m).handle(testApp, 1239) + resMsg, _ := ListenReq(m).handle(testApp, 1239) require.NoError(t, err) seqno, respSuccess := checkRpcResponseSuccess(t, resMsg, "listen") require.Equal(t, seqno, uint64(1239)) @@ -316,7 +316,7 @@ func setGatingConfigImpl(t *testing.T, app *app, allowedIps, allowedIds, bannedI gc.SetIsolate(false) var mRpcSeqno uint64 = 2003 - resMsg := SetGatingConfigReq(m).handle(app, mRpcSeqno) + resMsg, _ := SetGatingConfigReq(m).handle(app, mRpcSeqno) seqno, respSuccess := checkRpcResponseSuccess(t, resMsg, "setGatingConfig") require.Equal(t, seqno, mRpcSeqno) require.True(t, respSuccess.HasSetGatingConfig()) @@ -369,7 +369,7 @@ func TestSetNodeStatus(t *testing.T) { testStatus := []byte("test_node_status") require.NoError(t, m.SetStatus(testStatus)) - resMsg := SetNodeStatusReq(m).handle(testApp, 11239) + resMsg, _ := SetNodeStatusReq(m).handle(testApp, 11239) require.NoError(t, err) seqno, respSuccess := checkRpcResponseSuccess(t, resMsg, "setNodeStatus") require.Equal(t, seqno, uint64(11239)) diff --git a/src/app/libp2p_helper/src/libp2p_helper/incoming_msg.go b/src/app/libp2p_helper/src/libp2p_helper/incoming_msg.go index 7346b7819ac7..a4472c443c44 100644 --- a/src/app/libp2p_helper/src/libp2p_helper/incoming_msg.go +++ b/src/app/libp2p_helper/src/libp2p_helper/incoming_msg.go @@ -43,32 +43,36 @@ var pushMesssageExtractors = map[ipc.Libp2pHelperInterface_PushMessage_Which]ext // Handles messages coming from the OCaml process func (app *app) handleIncomingMsg(msg *ipc.Libp2pHelperInterface_Message) { if msg.HasRpcRequest() { - resp, err := func() (*capnp.Message, error) { + resp, afterWriteHandler, err := func() (*capnp.Message, func(), error) { req, err := msg.RpcRequest() if err != nil { - return nil, err + return nil, nil, err } h, err := req.Header() if err != nil { - return nil, err + return nil, nil, err } seqnoO, err := h.SequenceNumber() if err != nil { - return nil, err + return nil, nil, err } seqno := seqnoO.Seqno() extractor, foundHandler := rpcRequestExtractors[req.Which()] if !foundHandler { - return nil, errors.New("Received rpc message of an unknown type") + return nil, nil, errors.New("Received rpc message of an unknown type") } req2, err := extractor(req) if err != nil { - return nil, err + return nil, nil, err } - return req2.handle(app, seqno), nil + resp, afterWriteHandler := req2.handle(app, seqno) + return resp, afterWriteHandler, nil }() if err == nil { app.writeMsg(resp) + if afterWriteHandler != nil { + afterWriteHandler() + } } else { app.P2p.Logger.Errorf("Failed to process rpc message: %s", err) } diff --git a/src/app/libp2p_helper/src/libp2p_helper/msg.go b/src/app/libp2p_helper/src/libp2p_helper/msg.go index 6acf6b3a2803..053bbd640626 100644 --- a/src/app/libp2p_helper/src/libp2p_helper/msg.go +++ b/src/app/libp2p_helper/src/libp2p_helper/msg.go @@ -29,7 +29,12 @@ type extractPushMessage = func(ipcPushMessage) (pushMessage, error) type ipcRpcRequest = ipc.Libp2pHelperInterface_RpcRequest type rpcRequest interface { - handle(app *app, seqno uint64) *capnp.Message + // Handles rpc request and returns response and a function to be called + // immediately after writing response to the output stream + // + // Callback is needed in some cases to make sure response is written + // before some other messages might get written to the output stream + handle(app *app, seqno uint64) (*capnp.Message, func()) } type extractRequest = func(ipcRpcRequest) (rpcRequest, error) @@ -207,7 +212,7 @@ func setNanoTime(ns *ipc.UnixNano, t time.Time) { ns.SetNanoSec(t.UnixNano()) } -func mkRpcRespError(seqno uint64, rpcRespErr error) *capnp.Message { +func mkRpcRespErrorNoFunc(seqno uint64, rpcRespErr error) *capnp.Message { if rpcRespErr == nil { panic("mkRpcRespError: nil error") } @@ -228,7 +233,11 @@ func mkRpcRespError(seqno uint64, rpcRespErr error) *capnp.Message { }) } -func mkRpcRespSuccess(seqno uint64, f func(*ipc.Libp2pHelperInterface_RpcResponseSuccess)) *capnp.Message { +func mkRpcRespError(seqno uint64, rpcRespErr error) (*capnp.Message, func()) { + return mkRpcRespErrorNoFunc(seqno, rpcRespErr), nil +} + +func mkRpcRespSuccessNoFunc(seqno uint64, f func(*ipc.Libp2pHelperInterface_RpcResponseSuccess)) *capnp.Message { return mkMsg(func(seg *capnp.Segment) { m, err := ipc.NewRootDaemonInterface_Message(seg) panicOnErr(err) @@ -248,6 +257,10 @@ func mkRpcRespSuccess(seqno uint64, f func(*ipc.Libp2pHelperInterface_RpcRespons }) } +func mkRpcRespSuccess(seqno uint64, f func(*ipc.Libp2pHelperInterface_RpcResponseSuccess)) (*capnp.Message, func()) { + return mkRpcRespSuccessNoFunc(seqno, f), nil +} + func mkPushMsg(f func(ipc.DaemonInterface_PushMessage)) *capnp.Message { return mkMsg(func(seg *capnp.Segment) { m, err := ipc.NewRootDaemonInterface_Message(seg) diff --git a/src/app/libp2p_helper/src/libp2p_helper/peer_msg.go b/src/app/libp2p_helper/src/libp2p_helper/peer_msg.go index 74daed42656f..047a728c394b 100644 --- a/src/app/libp2p_helper/src/libp2p_helper/peer_msg.go +++ b/src/app/libp2p_helper/src/libp2p_helper/peer_msg.go @@ -22,7 +22,7 @@ func fromAddPeerReq(req ipcRpcRequest) (rpcRequest, error) { i, err := req.AddPeer() return AddPeerReq(i), err } -func (m AddPeerReq) handle(app *app, seqno uint64) *capnp.Message { +func (m AddPeerReq) handle(app *app, seqno uint64) (*capnp.Message, func()) { if app.P2p == nil { return mkRpcRespError(seqno, needsConfigure()) } @@ -71,7 +71,7 @@ func fromGetPeerNodeStatusReq(req ipcRpcRequest) (rpcRequest, error) { i, err := req.GetPeerNodeStatus() return GetPeerNodeStatusReq(i), err } -func (m GetPeerNodeStatusReq) handle(app *app, seqno uint64) *capnp.Message { +func (m GetPeerNodeStatusReq) handle(app *app, seqno uint64) (*capnp.Message, func()) { ctx, cancel := context.WithTimeout(app.Ctx, codanet.NodeStatusTimeout) defer cancel() pma, err := GetPeerNodeStatusReqT(m).Peer() @@ -147,7 +147,7 @@ func fromListPeersReq(req ipcRpcRequest) (rpcRequest, error) { i, err := req.ListPeers() return ListPeersReq(i), err } -func (msg ListPeersReq) handle(app *app, seqno uint64) *capnp.Message { +func (msg ListPeersReq) handle(app *app, seqno uint64) (*capnp.Message, func()) { if app.P2p == nil { return mkRpcRespError(seqno, needsConfigure()) } diff --git a/src/app/libp2p_helper/src/libp2p_helper/peer_msg_test.go b/src/app/libp2p_helper/src/libp2p_helper/peer_msg_test.go index b1bde309b0fb..5cddac5b95f6 100644 --- a/src/app/libp2p_helper/src/libp2p_helper/peer_msg_test.go +++ b/src/app/libp2p_helper/src/libp2p_helper/peer_msg_test.go @@ -28,7 +28,7 @@ func testAddPeerImplDo(t *testing.T, node *app, peerAddr peer.AddrInfo, isSeed b m.SetIsSeed(isSeed) var mRpcSeqno uint64 = 2000 - resMsg := AddPeerReq(m).handle(node, mRpcSeqno) + resMsg, _ := AddPeerReq(m).handle(node, mRpcSeqno) seqno, respSuccess := checkRpcResponseSuccess(t, resMsg, "addPeer") require.Equal(t, seqno, mRpcSeqno) require.True(t, respSuccess.HasAddPeer()) @@ -88,7 +88,7 @@ func TestGetPeerNodeStatus(t *testing.T) { require.NoError(t, ma.SetRepresentation(addr)) var mRpcSeqno uint64 = 18900 - resMsg := GetPeerNodeStatusReq(m).handle(appB, mRpcSeqno) + resMsg, _ := GetPeerNodeStatusReq(m).handle(appB, mRpcSeqno) seqno, respSuccess := checkRpcResponseSuccess(t, resMsg, "getPeerNodeStatus") require.Equal(t, seqno, mRpcSeqno) require.True(t, respSuccess.HasGetPeerNodeStatus()) @@ -108,7 +108,7 @@ func TestListPeers(t *testing.T) { require.NoError(t, err) var mRpcSeqno uint64 = 2002 - resMsg := ListPeersReq(m).handle(appB, mRpcSeqno) + resMsg, _ := ListPeersReq(m).handle(appB, mRpcSeqno) seqno, respSuccess := checkRpcResponseSuccess(t, resMsg, "listPeers") require.Equal(t, seqno, mRpcSeqno) require.True(t, respSuccess.HasListPeers()) diff --git a/src/app/libp2p_helper/src/libp2p_helper/pubsub_msg.go b/src/app/libp2p_helper/src/libp2p_helper/pubsub_msg.go index 8ef176ae03e0..885c199a1d42 100644 --- a/src/app/libp2p_helper/src/libp2p_helper/pubsub_msg.go +++ b/src/app/libp2p_helper/src/libp2p_helper/pubsub_msg.go @@ -66,7 +66,7 @@ func fromPublishReq(req ipcRpcRequest) (rpcRequest, error) { i, err := req.Publish() return PublishReq(i), err } -func (m PublishReq) handle(app *app, seqno uint64) *capnp.Message { +func (m PublishReq) handle(app *app, seqno uint64) (*capnp.Message, func()) { if app.P2p == nil { return mkRpcRespError(seqno, needsConfigure()) } @@ -111,7 +111,7 @@ func fromSubscribeReq(req ipcRpcRequest) (rpcRequest, error) { i, err := req.Subscribe() return SubscribeReq(i), err } -func (m SubscribeReq) handle(app *app, seqno uint64) *capnp.Message { +func (m SubscribeReq) handle(app *app, seqno uint64) (*capnp.Message, func()) { if app.P2p == nil { return mkRpcRespError(seqno, needsConfigure()) } @@ -244,7 +244,7 @@ func fromUnsubscribeReq(req ipcRpcRequest) (rpcRequest, error) { i, err := req.Unsubscribe() return UnsubscribeReq(i), err } -func (m UnsubscribeReq) handle(app *app, seqno uint64) *capnp.Message { +func (m UnsubscribeReq) handle(app *app, seqno uint64) (*capnp.Message, func()) { if app.P2p == nil { return mkRpcRespError(seqno, needsConfigure()) } diff --git a/src/app/libp2p_helper/src/libp2p_helper/pubsub_msg_test.go b/src/app/libp2p_helper/src/libp2p_helper/pubsub_msg_test.go index 6d8ae65579f3..403ac82303e1 100644 --- a/src/app/libp2p_helper/src/libp2p_helper/pubsub_msg_test.go +++ b/src/app/libp2p_helper/src/libp2p_helper/pubsub_msg_test.go @@ -19,7 +19,7 @@ func testPublishDo(t *testing.T, app *app, topic string, data []byte, rpcSeqno u require.NoError(t, m.SetTopic(topic)) require.NoError(t, m.SetData(data)) - resMsg := PublishReq(m).handle(app, rpcSeqno) + resMsg, _ := PublishReq(m).handle(app, rpcSeqno) require.NoError(t, err) seqno, respSuccess := checkRpcResponseSuccess(t, resMsg, "publish") require.Equal(t, seqno, rpcSeqno) @@ -47,7 +47,7 @@ func testSubscribeDo(t *testing.T, app *app, topic string, subId uint64, rpcSeqn require.NoError(t, err) sid.SetId(subId) - resMsg := SubscribeReq(m).handle(app, rpcSeqno) + resMsg, _ := SubscribeReq(m).handle(app, rpcSeqno) require.NoError(t, err) seqno, respSuccess := checkRpcResponseSuccess(t, resMsg, "subscribe") require.Equal(t, seqno, rpcSeqno) @@ -89,7 +89,7 @@ func TestUnsubscribe(t *testing.T) { require.NoError(t, err) sid.SetId(idx) - resMsg := UnsubscribeReq(m).handle(testApp, 7739) + resMsg, _ := UnsubscribeReq(m).handle(testApp, 7739) require.NoError(t, err) seqno, respSuccess := checkRpcResponseSuccess(t, resMsg, "unsubscribe") require.Equal(t, seqno, uint64(7739)) diff --git a/src/app/libp2p_helper/src/libp2p_helper/stream_msg.go b/src/app/libp2p_helper/src/libp2p_helper/stream_msg.go index c25456db3c73..a2560769960f 100644 --- a/src/app/libp2p_helper/src/libp2p_helper/stream_msg.go +++ b/src/app/libp2p_helper/src/libp2p_helper/stream_msg.go @@ -19,7 +19,7 @@ func fromAddStreamHandlerReq(req ipcRpcRequest) (rpcRequest, error) { i, err := req.AddStreamHandler() return AddStreamHandlerReq(i), err } -func (m AddStreamHandlerReq) handle(app *app, seqno uint64) *capnp.Message { +func (m AddStreamHandlerReq) handle(app *app, seqno uint64) (*capnp.Message, func()) { if app.P2p == nil { return mkRpcRespError(seqno, needsConfigure()) } @@ -51,7 +51,7 @@ func fromCloseStreamReq(req ipcRpcRequest) (rpcRequest, error) { i, err := req.CloseStream() return CloseStreamReq(i), err } -func (m CloseStreamReq) handle(app *app, seqno uint64) *capnp.Message { +func (m CloseStreamReq) handle(app *app, seqno uint64) (*capnp.Message, func()) { if app.P2p == nil { return mkRpcRespError(seqno, needsConfigure()) } @@ -77,7 +77,7 @@ func fromOpenStreamReq(req ipcRpcRequest) (rpcRequest, error) { i, err := req.OpenStream() return OpenStreamReq(i), err } -func (m OpenStreamReq) handle(app *app, seqno uint64) *capnp.Message { +func (m OpenStreamReq) handle(app *app, seqno uint64) (*capnp.Message, func()) { if app.P2p == nil { return mkRpcRespError(seqno, needsConfigure()) } @@ -122,14 +122,7 @@ func (m OpenStreamReq) handle(app *app, seqno uint64) *capnp.Message { } streamIdx := app.AddStream(stream) - go func() { - // FIXME HACK: allow time for the openStreamResult to get printed before we start inserting stream events - time.Sleep(250 * time.Millisecond) - // Note: It is _very_ important that we call handleStreamReads here -- this is how the "caller" side of the stream starts listening to the responses from the RPCs. Do not remove. - handleStreamReads(app, stream, streamIdx) - }() - - return mkRpcRespSuccess(seqno, func(m *ipc.Libp2pHelperInterface_RpcResponseSuccess) { + mkResponse := func(m *ipc.Libp2pHelperInterface_RpcResponseSuccess) { resp, err := m.NewOpenStream() panicOnErr(err) sid, err := resp.NewStreamId() @@ -138,7 +131,10 @@ func (m OpenStreamReq) handle(app *app, seqno uint64) *capnp.Message { pi, err := resp.NewPeer() panicOnErr(err) setPeerInfo(pi, peer) - }) + } + return mkRpcRespSuccessNoFunc(seqno, mkResponse), func() { + handleStreamReads(app, stream, streamIdx) + } } type RemoveStreamHandlerReqT = ipc.Libp2pHelperInterface_RemoveStreamHandler_Request @@ -148,7 +144,7 @@ func fromRemoveStreamHandlerReq(req ipcRpcRequest) (rpcRequest, error) { i, err := req.RemoveStreamHandler() return RemoveStreamHandlerReq(i), err } -func (m RemoveStreamHandlerReq) handle(app *app, seqno uint64) *capnp.Message { +func (m RemoveStreamHandlerReq) handle(app *app, seqno uint64) (*capnp.Message, func()) { if app.P2p == nil { return mkRpcRespError(seqno, needsConfigure()) } @@ -171,7 +167,7 @@ func fromResetStreamReq(req ipcRpcRequest) (rpcRequest, error) { i, err := req.ResetStream() return ResetStreamReq(i), err } -func (m ResetStreamReq) handle(app *app, seqno uint64) *capnp.Message { +func (m ResetStreamReq) handle(app *app, seqno uint64) (*capnp.Message, func()) { if app.P2p == nil { return mkRpcRespError(seqno, needsConfigure()) } @@ -197,7 +193,7 @@ func fromSendStreamReq(req ipcRpcRequest) (rpcRequest, error) { i, err := req.SendStream() return SendStreamReq(i), err } -func (m SendStreamReq) handle(app *app, seqno uint64) *capnp.Message { +func (m SendStreamReq) handle(app *app, seqno uint64) (*capnp.Message, func()) { if app.P2p == nil { return mkRpcRespError(seqno, needsConfigure()) } diff --git a/src/app/libp2p_helper/src/libp2p_helper/stream_msg_test.go b/src/app/libp2p_helper/src/libp2p_helper/stream_msg_test.go index d3621a2f7881..9de883a8a679 100644 --- a/src/app/libp2p_helper/src/libp2p_helper/stream_msg_test.go +++ b/src/app/libp2p_helper/src/libp2p_helper/stream_msg_test.go @@ -19,7 +19,7 @@ func testAddStreamHandlerDo(t *testing.T, protocol string, app *app, rpcSeqno ui require.NoError(t, err) require.NoError(t, m.SetProtocol(protocol)) - resMsg := AddStreamHandlerReq(m).handle(app, rpcSeqno) + resMsg, _ := AddStreamHandlerReq(m).handle(app, rpcSeqno) seqno, respSuccess := checkRpcResponseSuccess(t, resMsg, "addStreamHandler") require.Equal(t, seqno, rpcSeqno) require.True(t, respSuccess.HasAddStreamHandler()) @@ -58,7 +58,7 @@ func testOpenStreamDo(t *testing.T, appA *app, appBHost host.Host, appBPort uint require.NoError(t, pid.SetId(appBHost.ID().String())) require.NoError(t, err) - resMsg := OpenStreamReq(m).handle(appA, rpcSeqno) + resMsg, _ := OpenStreamReq(m).handle(appA, rpcSeqno) seqno, respSuccess := checkRpcResponseSuccess(t, resMsg, "openStream") require.Equal(t, seqno, rpcSeqno) require.True(t, respSuccess.HasOpenStream()) @@ -103,7 +103,7 @@ func testCloseStreamDo(t *testing.T, app *app, streamId uint64, rpcSeqno uint64) require.NoError(t, err) sid.SetId(streamId) - resMsg := CloseStreamReq(m).handle(app, rpcSeqno) + resMsg, _ := CloseStreamReq(m).handle(app, rpcSeqno) seqno, respSuccess := checkRpcResponseSuccess(t, resMsg, "closeStream") require.Equal(t, seqno, rpcSeqno) require.True(t, respSuccess.HasCloseStream()) @@ -134,7 +134,7 @@ func TestRemoveStreamHandler(t *testing.T) { require.NoError(t, err) require.NoError(t, rsh.SetProtocol(newProtocol)) var rshRpcSeqno uint64 = 1023 - resMsg := RemoveStreamHandlerReq(rsh).handle(appB, rshRpcSeqno) + resMsg, _ := RemoveStreamHandlerReq(rsh).handle(appB, rshRpcSeqno) seqno, respSuccess := checkRpcResponseSuccess(t, resMsg, "removeStreamHandler") require.Equal(t, seqno, rshRpcSeqno) require.True(t, respSuccess.HasRemoveStreamHandler()) @@ -151,7 +151,7 @@ func TestRemoveStreamHandler(t *testing.T) { require.NoError(t, err) var osRpcSeqno uint64 = 1026 - osResMsg := OpenStreamReq(os).handle(appA, osRpcSeqno) + osResMsg, _ := OpenStreamReq(os).handle(appA, osRpcSeqno) osRpcSeqno_, errMsg := checkRpcResponseError(t, osResMsg) require.Equal(t, osRpcSeqno, osRpcSeqno_) require.Equal(t, "libp2p error: protocols not supported: [/mina/99]", errMsg) @@ -166,7 +166,7 @@ func testResetStreamDo(t *testing.T, app *app, streamId uint64, rpcSeqno uint64) require.NoError(t, err) sid.SetId(streamId) - resMsg := ResetStreamReq(m).handle(app, rpcSeqno) + resMsg, _ := ResetStreamReq(m).handle(app, rpcSeqno) seqno, respSuccess := checkRpcResponseSuccess(t, resMsg, "resetStream") require.Equal(t, seqno, rpcSeqno) require.True(t, respSuccess.HasResetStream()) @@ -194,7 +194,7 @@ func testSendStreamDo(t *testing.T, app *app, streamId uint64, msgBytes []byte, sid.SetId(streamId) require.NoError(t, msg.SetData(msgBytes)) - resMsg := SendStreamReq(m).handle(app, rpcSeqno) + resMsg, _ := SendStreamReq(m).handle(app, rpcSeqno) seqno, respSuccess := checkRpcResponseSuccess(t, resMsg, "sendStream") require.Equal(t, seqno, rpcSeqno) require.True(t, respSuccess.HasSendStream()) @@ -260,7 +260,7 @@ func TestOpenStreamBeforeAndAfterSetGatingConfig(t *testing.T) { gc.SetIsolate(false) var mRpcSeqno uint64 = 2003 - resMsg := SetGatingConfigReq(m).handle(appB, mRpcSeqno) + resMsg, _ := SetGatingConfigReq(m).handle(appB, mRpcSeqno) seqno, respSuccess := checkRpcResponseSuccess(t, resMsg, "setGatingConfig") require.Equal(t, seqno, mRpcSeqno) require.True(t, respSuccess.HasSetGatingConfig()) @@ -291,7 +291,7 @@ func TestOpenStreamBeforeAndAfterSetGatingConfig(t *testing.T) { require.NoError(t, pid.SetId(appB.P2p.Host.ID().String())) require.NoError(t, err) - resMsg := OpenStreamReq(m).handle(appA, 9905) + resMsg, _ := OpenStreamReq(m).handle(appA, 9905) seqno, _ := checkRpcResponseError(t, resMsg) require.Equal(t, uint64(9905), seqno) } diff --git a/src/app/libp2p_helper/src/libp2p_helper/util_test.go b/src/app/libp2p_helper/src/libp2p_helper/util_test.go index 0ea55f729a02..fe7b0a8970fa 100644 --- a/src/app/libp2p_helper/src/libp2p_helper/util_test.go +++ b/src/app/libp2p_helper/src/libp2p_helper/util_test.go @@ -195,7 +195,8 @@ func beginAdvertisingSendAndCheckDo(app *app, rpcSeqno uint64) (*capnp.Message, if err != nil { return nil, err } - return BeginAdvertisingReq(m).handle(app, rpcSeqno), nil + r, _ := BeginAdvertisingReq(m).handle(app, rpcSeqno) + return r, nil } func checkBeginAdvertisingResponse(t *testing.T, rpcSeqno uint64, resMsg *capnp.Message) { From 9e8efd444fd20bd488da32ec0f5a2ad30c02510b Mon Sep 17 00:00:00 2001 From: georgeee Date: Thu, 16 Nov 2023 13:04:10 +0100 Subject: [PATCH 019/119] Remove unused seeds field of Helper --- src/app/libp2p_helper/src/codanet.go | 10 ---------- src/app/libp2p_helper/src/libp2p_helper/peer_msg.go | 5 ----- 2 files changed, 15 deletions(-) diff --git a/src/app/libp2p_helper/src/codanet.go b/src/app/libp2p_helper/src/codanet.go index 113cecec954a..df4c732d47da 100644 --- a/src/app/libp2p_helper/src/codanet.go +++ b/src/app/libp2p_helper/src/codanet.go @@ -243,8 +243,6 @@ type Helper struct { ConnectionManager *CodaConnectionManager BandwidthCounter *metrics.BandwidthCounter MsgStats *MessageStats - _seeds []peer.AddrInfo - seedsMutex sync.RWMutex NodeStatus []byte HeartbeatPeer func(peer.ID) } @@ -396,13 +394,6 @@ func (h *Helper) SetGatingState(gs *CodaGatingConfig) { } } -func (h *Helper) AddSeeds(infos ...peer.AddrInfo) { - // TODO: this "_seeds" field is never read anywhere, is it needed? - h.seedsMutex.Lock() - h._seeds = append(h._seeds, infos...) - h.seedsMutex.Unlock() -} - func (gs *CodaGatingState) TrustPeer(p peer.ID) { gs.trustedPeersMutex.Lock() defer gs.trustedPeersMutex.Unlock() @@ -786,7 +777,6 @@ func MakeHelper(ctx context.Context, listenOn []ma.Multiaddr, externalAddr ma.Mu ConnectionManager: connManager, BandwidthCounter: bandwidthCounter, MsgStats: &MessageStats{min: math.MaxUint64}, - _seeds: seeds, HeartbeatPeer: func(p peer.ID) { lanPatcher.Heartbeat(p) wanPatcher.Heartbeat(p) diff --git a/src/app/libp2p_helper/src/libp2p_helper/peer_msg.go b/src/app/libp2p_helper/src/libp2p_helper/peer_msg.go index 047a728c394b..63b08f587486 100644 --- a/src/app/libp2p_helper/src/libp2p_helper/peer_msg.go +++ b/src/app/libp2p_helper/src/libp2p_helper/peer_msg.go @@ -48,11 +48,6 @@ func (m AddPeerReq) handle(app *app, seqno uint64) (*capnp.Message, func()) { } app.P2p.Logger.Info("addPeer Trying to connect to: ", info) - - if AddPeerReqT(m).IsSeed() { - app.P2p.AddSeeds(*info) - } - err = app.P2p.Host.Connect(app.Ctx, *info) if err != nil { return mkRpcRespError(seqno, badp2p(err)) From 554285f99ce6a974874d24168507f788c1024363 Mon Sep 17 00:00:00 2001 From: georgeee Date: Thu, 16 Nov 2023 13:26:49 +0100 Subject: [PATCH 020/119] fixup! Protect shared data accesses --- .../libp2p_helper/src/libp2p_helper/app.go | 67 +++++-------------- .../src/libp2p_helper/pubsub_msg.go | 32 ++++----- .../src/libp2p_helper/stream_msg.go | 19 +++++- 3 files changed, 48 insertions(+), 70 deletions(-) diff --git a/src/app/libp2p_helper/src/libp2p_helper/app.go b/src/app/libp2p_helper/src/libp2p_helper/app.go index 369cec9dc204..b6b51a6c5965 100644 --- a/src/app/libp2p_helper/src/libp2p_helper/app.go +++ b/src/app/libp2p_helper/src/libp2p_helper/app.go @@ -68,12 +68,13 @@ func (app *app) AddPeers(infos ...peer.AddrInfo) { app._addedPeers = append(app._addedPeers, infos...) } +// GetAddedPeers returns list of peers +// +// Elements of returned slice should never be modified! func (app *app) GetAddedPeers() []peer.AddrInfo { app.addedPeersMutex.RLock() defer app.addedPeersMutex.RUnlock() - copyOfAddedPeers := make([]peer.AddrInfo, len(app._addedPeers)) - copy(copyOfAddedPeers, app._addedPeers) - return copyOfAddedPeers + return app._addedPeers } func (app *app) ResetAddedPeers() { @@ -90,32 +91,12 @@ func (app *app) AddStream(stream net.Stream) uint64 { return streamIdx } -func (app *app) CloseStream(streamId uint64) error { +func (app *app) RemoveStream(streamId uint64) (net.Stream, bool) { app.streamsMutex.Lock() defer app.streamsMutex.Unlock() - if stream, ok := app._streams[streamId]; ok { - delete(app._streams, streamId) - err := stream.Close() - if err != nil { - return badp2p(err) - } - return nil - } - return badRPC(errors.New("unknown stream_idx")) -} - -func (app *app) ResetStream(streamId uint64) error { - app.streamsMutex.Lock() - defer app.streamsMutex.Unlock() - if stream, ok := app._streams[streamId]; ok { - delete(app._streams, streamId) - err := stream.Reset() - if err != nil { - return badp2p(err) - } - return nil - } - return badRPC(errors.New("unknown stream_idx")) + stream, ok := app._streams[streamId] + delete(app._streams, streamId) + return stream, ok } func (app *app) StreamWrite(streamId uint64, data []byte) error { @@ -149,12 +130,6 @@ func (app *app) AddValidator() (uint64, chan pubsub.ValidationResult) { return seqno, ch } -func (app *app) RemoveValidator(seqno uint64) { - app.validatorMutex.Lock() - defer app.validatorMutex.Unlock() - delete(app._validators, seqno) -} - func (app *app) TimeoutValidator(seqno uint64) { now := time.Now() app.validatorMutex.Lock() @@ -162,16 +137,12 @@ func (app *app) TimeoutValidator(seqno uint64) { app._validators[seqno].TimedOutAt = &now } -func (app *app) FinishValidator(seqno uint64, finish func(st *validationStatus)) bool { +func (app *app) RemoveValidator(seqno uint64) (*validationStatus, bool) { app.validatorMutex.Lock() defer app.validatorMutex.Unlock() - if st, ok := app._validators[seqno]; ok { - finish(st) - delete(app._validators, seqno) - return true - } else { - return false - } + st, ok := app._validators[seqno] + delete(app._validators, seqno) + return st, ok } func (app *app) AddTopic(topicName string, topic *pubsub.Topic) { @@ -193,18 +164,12 @@ func (app *app) AddSubscription(subId uint64, sub subscription) { app._subs[subId] = sub } -func (app *app) CancelSubscription(subId uint64) bool { +func (app *app) RemoveSubscription(subId uint64) (subscription, bool) { app.subsMutex.Lock() defer app.subsMutex.Unlock() - - if sub, ok := app._subs[subId]; ok { - sub.Sub.Cancel() - sub.Cancel() - delete(app._subs, subId) - return true - } - - return false + sub, ok := app._subs[subId] + delete(app._subs, subId) + return sub, ok } func parseMultiaddrWithID(ma multiaddr.Multiaddr, id peer.ID) (*codaPeerInfo, error) { diff --git a/src/app/libp2p_helper/src/libp2p_helper/pubsub_msg.go b/src/app/libp2p_helper/src/libp2p_helper/pubsub_msg.go index 885c199a1d42..12a167be5e13 100644 --- a/src/app/libp2p_helper/src/libp2p_helper/pubsub_msg.go +++ b/src/app/libp2p_helper/src/libp2p_helper/pubsub_msg.go @@ -35,26 +35,24 @@ func (m ValidationPush) handle(app *app) { app.P2p.Logger.Errorf("handleValidation: error %s", err) return } + res := ValidationUnknown + switch ValidationPushT(m).Result() { + case ipc.ValidationResult_accept: + res = pubsub.ValidationAccept + case ipc.ValidationResult_reject: + res = pubsub.ValidationReject + case ipc.ValidationResult_ignore: + res = pubsub.ValidationIgnore + default: + app.P2p.Logger.Warnf("handleValidation: unknown validation result %d", ValidationPushT(m).Result()) + } seqno := vid.Id() - found := app.FinishValidator(seqno, func(st *validationStatus) { - res := ValidationUnknown - switch ValidationPushT(m).Result() { - case ipc.ValidationResult_accept: - res = pubsub.ValidationAccept - case ipc.ValidationResult_reject: - res = pubsub.ValidationReject - case ipc.ValidationResult_ignore: - res = pubsub.ValidationIgnore - default: - app.P2p.Logger.Warnf("handleValidation: unknown validation result %d", ValidationPushT(m).Result()) - } + if st, found := app.RemoveValidator(seqno); found { st.Completion <- res if st.TimedOutAt != nil { app.P2p.Logger.Errorf("validation for item %d took %d seconds", seqno, time.Now().Add(validationTimeout).Sub(*st.TimedOutAt)) } - }) - - if !found { + } else { app.P2p.Logger.Warnf("handleValidation: validation seqno %d unknown", seqno) } } @@ -253,7 +251,9 @@ func (m UnsubscribeReq) handle(app *app, seqno uint64) (*capnp.Message, func()) return mkRpcRespError(seqno, badRPC(err)) } subId := subId_.Id() - if app.CancelSubscription(subId) { + if sub, found := app.RemoveSubscription(subId); found { + sub.Sub.Cancel() + sub.Cancel() return mkRpcRespSuccess(seqno, func(m *ipc.Libp2pHelperInterface_RpcResponseSuccess) { _, err := m.NewUnsubscribe() panicOnErr(err) diff --git a/src/app/libp2p_helper/src/libp2p_helper/stream_msg.go b/src/app/libp2p_helper/src/libp2p_helper/stream_msg.go index a2560769960f..12e7c7b9770a 100644 --- a/src/app/libp2p_helper/src/libp2p_helper/stream_msg.go +++ b/src/app/libp2p_helper/src/libp2p_helper/stream_msg.go @@ -7,6 +7,7 @@ import ( ipc "libp2p_ipc" capnp "capnproto.org/go/capnp/v3" + "github.com/go-errors/errors" net "github.com/libp2p/go-libp2p/core/network" peer "github.com/libp2p/go-libp2p/core/peer" protocol "github.com/libp2p/go-libp2p/core/protocol" @@ -34,8 +35,8 @@ func (m AddStreamHandlerReq) handle(app *app, seqno uint64) (*capnp.Message, fun return } streamIdx := app.AddStream(stream) - handleStreamReads(app, stream, streamIdx) app.writeMsg(mkIncomingStreamUpcall(peerinfo, streamIdx, protocolId)) + handleStreamReads(app, stream, streamIdx) }) return mkRpcRespSuccess(seqno, func(m *ipc.Libp2pHelperInterface_RpcResponseSuccess) { @@ -60,7 +61,13 @@ func (m CloseStreamReq) handle(app *app, seqno uint64) (*capnp.Message, func()) return mkRpcRespError(seqno, badRPC(err)) } streamId := sid.Id() - err = app.CloseStream(streamId) + if stream, found := app.RemoveStream(streamId); found { + if err2 := stream.Close(); err2 != nil { + err = badp2p(err2) + } + } else { + err = badRPC(errors.New("unknown stream_idx")) + } if err != nil { return mkRpcRespError(seqno, err) } @@ -176,7 +183,13 @@ func (m ResetStreamReq) handle(app *app, seqno uint64) (*capnp.Message, func()) return mkRpcRespError(seqno, badRPC(err)) } streamId := sid.Id() - err = app.ResetStream(streamId) + if stream, found := app.RemoveStream(streamId); found { + if err2 := stream.Reset(); err2 != nil { + err = badp2p(err2) + } + } else { + err = badRPC(errors.New("unknown stream_idx")) + } if err != nil { return mkRpcRespError(seqno, err) } From aafbb9c69dc7f1e9f8d8f9b678cd5cdd4e4f7feb Mon Sep 17 00:00:00 2001 From: georgeee Date: Thu, 16 Nov 2023 13:55:06 +0100 Subject: [PATCH 021/119] Fine-grained locking for stream writes Problem: writing to a stream pauses all other stream writes. This might be troublesome e.g. in case of a peer disconnecting: until connection timeout is not propagated, we may get stuck trying to send it bytes, while writing to streams of other peers is blocked. Solution: use fine-grained locking on stream level. P.S. previous commit introduces a potential concurrency issue in stream reset/write not being synchronized. This commit fixes this issue. --- .../libp2p_helper/src/libp2p_helper/app.go | 41 +++++++++++-------- .../libp2p_helper/src/libp2p_helper/data.go | 22 +++++++++- .../src/libp2p_helper/stream_msg.go | 2 +- .../src/libp2p_helper/util_test.go | 2 +- 4 files changed, 47 insertions(+), 20 deletions(-) diff --git a/src/app/libp2p_helper/src/libp2p_helper/app.go b/src/app/libp2p_helper/src/libp2p_helper/app.go index b6b51a6c5965..a740ba37d528 100644 --- a/src/app/libp2p_helper/src/libp2p_helper/app.go +++ b/src/app/libp2p_helper/src/libp2p_helper/app.go @@ -31,7 +31,7 @@ func newApp() *app { _subs: make(map[uint64]subscription), _topics: make(map[string]*pubsub.Topic), _validators: make(map[uint64]*validationStatus), - _streams: make(map[uint64]net.Stream), + _streams: make(map[uint64]*stream), OutChan: outChan, Out: bufio.NewWriter(os.Stdout), _addedPeers: []peer.AddrInfo{}, @@ -83,15 +83,15 @@ func (app *app) ResetAddedPeers() { app._addedPeers = nil } -func (app *app) AddStream(stream net.Stream) uint64 { +func (app *app) AddStream(stream_ net.Stream) uint64 { streamIdx := app.NextId() app.streamsMutex.Lock() defer app.streamsMutex.Unlock() - app._streams[streamIdx] = stream + app._streams[streamIdx] = &stream{stream: stream_} return streamIdx } -func (app *app) RemoveStream(streamId uint64) (net.Stream, bool) { +func (app *app) RemoveStream(streamId uint64) (*stream, bool) { app.streamsMutex.Lock() defer app.streamsMutex.Unlock() stream, ok := app._streams[streamId] @@ -99,19 +99,28 @@ func (app *app) RemoveStream(streamId uint64) (net.Stream, bool) { return stream, ok } -func (app *app) StreamWrite(streamId uint64, data []byte) error { - // TODO Consider using a more fine-grained locking strategy, - // not using a global mutex to lock on a message sending - app.streamsMutex.Lock() - defer app.streamsMutex.Unlock() - if stream, ok := app._streams[streamId]; ok { - n, err := stream.Write(data) - if err != nil { +func (app *app) getStream(streamId uint64) (*stream, bool) { + app.streamsMutex.RLock() + defer app.streamsMutex.RUnlock() + s, has := app._streams[streamId] + return s, has +} + +func (app *app) WriteStream(streamId uint64, data []byte) error { + if stream, ok := app.getStream(streamId); ok { + stream.mutex.Lock() + defer stream.mutex.Unlock() + + if n, err := stream.stream.Write(data); err != nil { // TODO check that it's correct to error out, not repeat writing - delete(app._streams, streamId) - close_err := stream.Close() - if close_err != nil { - app.P2p.Logger.Errorf("failed to close stream %d after encountering write failure (%s): %s", streamId, err.Error(), close_err.Error()) + _, has := app.RemoveStream(streamId) + if has { + // If stream is no longer in the *app, it means it is closed or soon to be closed by + // another goroutine + close_err := stream.stream.Close() + if close_err != nil { + app.P2p.Logger.Errorf("failed to close stream %d after encountering write failure (%s): %s", streamId, err.Error(), close_err.Error()) + } } return wrapError(badp2p(err), fmt.Sprintf("only wrote %d out of %d bytes", n, len(data))) } diff --git a/src/app/libp2p_helper/src/libp2p_helper/data.go b/src/app/libp2p_helper/src/libp2p_helper/data.go index bf1748aaf493..482242a699af 100644 --- a/src/app/libp2p_helper/src/libp2p_helper/data.go +++ b/src/app/libp2p_helper/src/libp2p_helper/data.go @@ -20,6 +20,24 @@ import ( peer "github.com/libp2p/go-libp2p/core/peer" ) +// Stream with mutex +type stream struct { + mutex sync.Mutex + stream net.Stream +} + +func (s *stream) Reset() error { + s.mutex.Lock() + defer s.mutex.Unlock() + return s.stream.Reset() +} + +func (s *stream) Close() error { + s.mutex.Lock() + defer s.mutex.Unlock() + return s.stream.Close() +} + type app struct { P2p *codanet.Helper Ctx context.Context @@ -29,8 +47,8 @@ type app struct { topicsMutex sync.RWMutex _validators map[uint64]*validationStatus validatorMutex sync.Mutex - _streams map[uint64]net.Stream - streamsMutex sync.Mutex + _streams map[uint64]*stream + streamsMutex sync.RWMutex Out *bufio.Writer OutChan chan *capnp.Message Bootstrapper io.Closer diff --git a/src/app/libp2p_helper/src/libp2p_helper/stream_msg.go b/src/app/libp2p_helper/src/libp2p_helper/stream_msg.go index 12e7c7b9770a..13b2136ea533 100644 --- a/src/app/libp2p_helper/src/libp2p_helper/stream_msg.go +++ b/src/app/libp2p_helper/src/libp2p_helper/stream_msg.go @@ -224,7 +224,7 @@ func (m SendStreamReq) handle(app *app, seqno uint64) (*capnp.Message, func()) { } streamId := sid.Id() - err = app.StreamWrite(streamId, data) + err = app.WriteStream(streamId, data) if err != nil { return mkRpcRespError(seqno, err) diff --git a/src/app/libp2p_helper/src/libp2p_helper/util_test.go b/src/app/libp2p_helper/src/libp2p_helper/util_test.go index fe7b0a8970fa..6047e490b0c2 100644 --- a/src/app/libp2p_helper/src/libp2p_helper/util_test.go +++ b/src/app/libp2p_helper/src/libp2p_helper/util_test.go @@ -92,7 +92,7 @@ func newTestAppWithMaxConnsAndCtxAndGrace(t *testing.T, privkey crypto.PrivKey, _subs: make(map[uint64]subscription), _topics: make(map[string]*pubsub.Topic), _validators: make(map[uint64]*validationStatus), - _streams: make(map[uint64]net.Stream), + _streams: make(map[uint64]*stream), _addedPeers: make([]peer.AddrInfo, 0, 512), OutChan: outChan, MetricsRefreshTime: time.Second * 2, From 7abacae6e24e256a5deaea244d3eef7f1cb3f38c Mon Sep 17 00:00:00 2001 From: georgeee Date: Thu, 16 Nov 2023 16:58:07 +0100 Subject: [PATCH 022/119] fixup! Fix go read/write races --- .../src/libp2p_helper/pubsub_msg_test.go | 25 ++++++++----------- 1 file changed, 10 insertions(+), 15 deletions(-) diff --git a/src/app/libp2p_helper/src/libp2p_helper/pubsub_msg_test.go b/src/app/libp2p_helper/src/libp2p_helper/pubsub_msg_test.go index 403ac82303e1..db4f5b5a67e3 100644 --- a/src/app/libp2p_helper/src/libp2p_helper/pubsub_msg_test.go +++ b/src/app/libp2p_helper/src/libp2p_helper/pubsub_msg_test.go @@ -1,6 +1,7 @@ package main import ( + "math/rand" "testing" "github.com/stretchr/testify/require" @@ -104,22 +105,16 @@ func TestUnsubscribe(t *testing.T) { func TestValidationPush(t *testing.T) { testApp, _ := newTestApp(t, nil, true) - ipcValResults := []ipc.ValidationResult{ - ipc.ValidationResult_accept, - ipc.ValidationResult_reject, - ipc.ValidationResult_ignore, + ipc2Pubsub := map[ipc.ValidationResult]pubsub.ValidationResult{ + ipc.ValidationResult_accept: pubsub.ValidationAccept, + ipc.ValidationResult_reject: pubsub.ValidationReject, + ipc.ValidationResult_ignore: pubsub.ValidationIgnore, } - pubsubValResults := []pubsub.ValidationResult{ - pubsub.ValidationAccept, - pubsub.ValidationReject, - pubsub.ValidationIgnore, - } - - for i := 0; i < len(ipcValResults); i++ { - seqno := uint64(i) + for resIpc, resPS := range ipc2Pubsub { + seqno := rand.Uint64() status := &validationStatus{ - Completion: make(chan pubsub.ValidationResult), + Completion: make(chan pubsub.ValidationResult, 1), } testApp._validators[seqno] = status _, seg, err := capnp.NewMessage(capnp.SingleSegment(nil)) @@ -128,11 +123,11 @@ func TestValidationPush(t *testing.T) { require.NoError(t, err) validationId, err := m.NewValidationId() validationId.SetId(seqno) - m.SetResult(ipcValResults[i]) + m.SetResult(resIpc) ValidationPush(m).handle(testApp) require.NoError(t, err) result := <-status.Completion - require.Equal(t, pubsubValResults[i], result) + require.Equal(t, resPS, result) _, has := testApp._validators[seqno] require.False(t, has) } From 8548686e3d017e95e4f5129262ee49524b46c0df Mon Sep 17 00:00:00 2001 From: georgeee Date: Thu, 16 Nov 2023 20:21:02 +0100 Subject: [PATCH 023/119] Fix libp2p unit tests --- .../libp2p_helper/src/libp2p_helper/app.go | 10 ++-- .../src/libp2p_helper/main_test.go | 18 ++++--- .../src/libp2p_helper/message_id_test.go | 10 +++- .../src/libp2p_helper/stream_msg_test.go | 54 ++++++++++++++++--- 4 files changed, 73 insertions(+), 19 deletions(-) diff --git a/src/app/libp2p_helper/src/libp2p_helper/app.go b/src/app/libp2p_helper/src/libp2p_helper/app.go index a740ba37d528..2c49af18ea3e 100644 --- a/src/app/libp2p_helper/src/libp2p_helper/app.go +++ b/src/app/libp2p_helper/src/libp2p_helper/app.go @@ -46,11 +46,11 @@ func (app *app) SetConnectionHandlers() { app.setConnectionHandlersOnce.Do(func() { app.P2p.ConnectionManager.AddOnConnectHandler(func(net net.Network, c net.Conn) { app.updateConnectionMetrics() - app.writeMsg(mkPeerConnectedUpcall(peer.Encode(c.RemotePeer()))) + app.writeMsg(mkPeerConnectedUpcall(c.RemotePeer().String())) }) app.P2p.ConnectionManager.AddOnDisconnectHandler(func(net net.Network, c net.Conn) { app.updateConnectionMetrics() - app.writeMsg(mkPeerDisconnectedUpcall(peer.Encode(c.RemotePeer()))) + app.writeMsg(mkPeerDisconnectedUpcall(c.RemotePeer().String())) }) }) } @@ -119,7 +119,7 @@ func (app *app) WriteStream(streamId uint64, data []byte) error { // another goroutine close_err := stream.stream.Close() if close_err != nil { - app.P2p.Logger.Errorf("failed to close stream %d after encountering write failure (%s): %s", streamId, err.Error(), close_err.Error()) + app.P2p.Logger.Debugf("failed to close stream %d after encountering write failure (%s): %s", streamId, err.Error(), close_err.Error()) } } return wrapError(badp2p(err), fmt.Sprintf("only wrote %d out of %d bytes", n, len(data))) @@ -308,13 +308,13 @@ func (app *app) checkPeerCount() { err = prometheus.Register(peerCount) if err != nil { - app.P2p.Logger.Debugf("couldn't register peer_count; perhaps we've already done so", err.Error()) + app.P2p.Logger.Debugf("couldn't register peer_count; perhaps we've already done so: %s", err) return } err = prometheus.Register(connectedPeerCount) if err != nil { - app.P2p.Logger.Debugf("couldn't register connected_peer_count; perhaps we've already done so", err.Error()) + app.P2p.Logger.Debugf("couldn't register connected_peer_count; perhaps we've already done so: %s", err) return } diff --git a/src/app/libp2p_helper/src/libp2p_helper/main_test.go b/src/app/libp2p_helper/src/libp2p_helper/main_test.go index 2c8c67bdf4b2..bc135bafeae5 100644 --- a/src/app/libp2p_helper/src/libp2p_helper/main_test.go +++ b/src/app/libp2p_helper/src/libp2p_helper/main_test.go @@ -5,7 +5,6 @@ import ( "context" "fmt" "io" - "io/ioutil" "os" "strings" "sync" @@ -23,6 +22,8 @@ import ( net "github.com/libp2p/go-libp2p/core/network" + gonet "net" + ipc "libp2p_ipc" "github.com/stretchr/testify/require" @@ -54,7 +55,7 @@ const ( ) func TestMplex_SendLargeMessage(t *testing.T) { - // assert we are able to send and receive a message with size up to 1 << 30 bytes + // assert we are able to send and receive a message with size up to 1 MiB appA, _ := newTestApp(t, nil, true) appA.NoDHT = true @@ -68,7 +69,7 @@ func TestMplex_SendLargeMessage(t *testing.T) { err = appB.P2p.Host.Connect(appB.Ctx, appAInfos[0]) require.NoError(t, err) - msgSize := uint64(1 << 30) + msgSize := uint64(1 << 20) withTimeoutAsync(t, func(done chan interface{}) { // create handler that reads `msgSize` bytes @@ -274,9 +275,14 @@ func TestLibp2pMetrics(t *testing.T) { appB.P2p.Host.SetStreamHandler(testProtocol, handler) + listener, err := gonet.Listen("tcp", ":0") + if err != nil { + panic(err) + } + port := listener.Addr().(*gonet.TCPAddr).Port server := http.NewServeMux() server.Handle("/metrics", promhttp.Handler()) - go http.ListenAndServe(":9001", server) + go http.Serve(listener, server) go appB.checkPeerCount() go appB.checkMessageStats() @@ -292,11 +298,11 @@ func TestLibp2pMetrics(t *testing.T) { expectedPeerCount := len(appB.P2p.Host.Network().Peers()) expectedCurrentConnCount := appB.P2p.ConnectionManager.GetInfo().ConnCount - resp, err := http.Get("http://localhost:9001/metrics") + resp, err := http.Get(fmt.Sprintf("http://localhost:%d/metrics", port)) require.NoError(t, err) defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) require.NoError(t, err) respBody := string(body) diff --git a/src/app/libp2p_helper/src/libp2p_helper/message_id_test.go b/src/app/libp2p_helper/src/libp2p_helper/message_id_test.go index 8677e3527180..215bf0a1bd68 100644 --- a/src/app/libp2p_helper/src/libp2p_helper/message_id_test.go +++ b/src/app/libp2p_helper/src/libp2p_helper/message_id_test.go @@ -48,11 +48,18 @@ func testPubsubMsgIdFun(t *testing.T, topic string) { // Subscribe to the topic testSubscribeDo(t, alice, topic, 21, 58) + // Timeouts between subscriptions are needed because otherwise each process would try to discover peers + // and will only find that no other peers are connected to the same topic. + // That said, pubsub's implementation is imperfect + time.Sleep(time.Second) testSubscribeDo(t, bob, topic, 21, 58) + time.Sleep(time.Second) testSubscribeDo(t, carol, topic, 21, 58) + time.Sleep(time.Second) _ = testOpenStreamDo(t, bob, alice.P2p.Host, appAPort, 9900, string(newProtocol)) _ = testOpenStreamDo(t, carol, alice.P2p.Host, appAPort, 9900, string(newProtocol)) + <-trapA.IncomingStream <-trapA.IncomingStream @@ -60,8 +67,7 @@ func testPubsubMsgIdFun(t *testing.T, topic string) { testPublishDo(t, alice, topic, msg, 21) testPublishDo(t, bob, topic, msg, 21) - time.Sleep(time.Millisecond * 100) - + time.Sleep(time.Second) n := 0 loop: for { diff --git a/src/app/libp2p_helper/src/libp2p_helper/stream_msg_test.go b/src/app/libp2p_helper/src/libp2p_helper/stream_msg_test.go index 9de883a8a679..8e4cd233a5de 100644 --- a/src/app/libp2p_helper/src/libp2p_helper/stream_msg_test.go +++ b/src/app/libp2p_helper/src/libp2p_helper/stream_msg_test.go @@ -2,6 +2,7 @@ package main import ( "context" + "math/rand" "testing" "github.com/stretchr/testify/require" @@ -58,7 +59,10 @@ func testOpenStreamDo(t *testing.T, appA *app, appBHost host.Host, appBPort uint require.NoError(t, pid.SetId(appBHost.ID().String())) require.NoError(t, err) - resMsg, _ := OpenStreamReq(m).handle(appA, rpcSeqno) + resMsg, afterWriteHandler := OpenStreamReq(m).handle(appA, rpcSeqno) + if afterWriteHandler != nil { + afterWriteHandler() + } seqno, respSuccess := checkRpcResponseSuccess(t, resMsg, "openStream") require.Equal(t, seqno, rpcSeqno) require.True(t, respSuccess.HasOpenStream()) @@ -151,7 +155,10 @@ func TestRemoveStreamHandler(t *testing.T) { require.NoError(t, err) var osRpcSeqno uint64 = 1026 - osResMsg, _ := OpenStreamReq(os).handle(appA, osRpcSeqno) + osResMsg, afterWriteHandler := OpenStreamReq(os).handle(appA, osRpcSeqno) + if afterWriteHandler != nil { + afterWriteHandler() + } osRpcSeqno_, errMsg := checkRpcResponseError(t, osResMsg) require.Equal(t, osRpcSeqno, osRpcSeqno_) require.Equal(t, "libp2p error: protocols not supported: [/mina/99]", errMsg) @@ -182,6 +189,22 @@ func TestResetStream(t *testing.T) { testResetStreamDo(t, appA, streamId, 114558) } +func testSendStreamFailDo(t *testing.T, app *app, streamId uint64, msgBytes []byte, rpcSeqno uint64) { + _, seg, err := capnp.NewMessage(capnp.SingleSegment(nil)) + require.NoError(t, err) + m, err := ipc.NewRootLibp2pHelperInterface_SendStream_Request(seg) + require.NoError(t, err) + msg, err := m.NewMsg() + require.NoError(t, err) + sid, err := msg.NewStreamId() + require.NoError(t, err) + sid.SetId(streamId) + require.NoError(t, msg.SetData(msgBytes)) + + resMsg, _ := SendStreamReq(m).handle(app, rpcSeqno) + checkRpcResponseError(t, resMsg) +} + func testSendStreamDo(t *testing.T, app *app, streamId uint64, msgBytes []byte, rpcSeqno uint64) { _, seg, err := capnp.NewMessage(capnp.SingleSegment(nil)) require.NoError(t, err) @@ -221,7 +244,7 @@ func TestOpenStreamBeforeAndAfterSetGatingConfig(t *testing.T) { aUpcallErrChan := make(chan error) launchFeedUpcallTrap(appA.P2p.Logger, appA.OutChan, aTrap, aUpcallErrChan, ctx) - appB, appBPort := newTestApp(t, appAInfos, false) + appB, appBPort := newTestApp(t, nil, false) err = appB.P2p.Host.Connect(appB.Ctx, appAInfos[0]) require.NoError(t, err) bTrap := newUpcallTrap("appB", 64, upcallDropAllMask^(1< Date: Tue, 21 Nov 2023 21:25:10 +0100 Subject: [PATCH 024/119] Allow to run CI on forked repositories --- buildkite/scripts/export-git-env-vars.sh | 70 ++++++++++++------- buildkite/scripts/generate-diff.sh | 6 +- buildkite/scripts/handle-fork.sh | 14 ++++ buildkite/scripts/merges-cleanly.sh | 8 ++- .../scripts/run-snark-transaction-profiler.sh | 4 +- buildkite/scripts/version-linter.sh | 6 +- buildkite/src/Command/DockerImage.dhall | 4 +- buildkite/src/Jobs/Test/VersionLint.dhall | 2 +- buildkite/src/Monorepo.dhall | 1 + scripts/release-docker.sh | 6 +- 10 files changed, 81 insertions(+), 40 deletions(-) create mode 100755 buildkite/scripts/handle-fork.sh diff --git a/buildkite/scripts/export-git-env-vars.sh b/buildkite/scripts/export-git-env-vars.sh index 7481a5b98379..5b45997339d4 100755 --- a/buildkite/scripts/export-git-env-vars.sh +++ b/buildkite/scripts/export-git-env-vars.sh @@ -3,6 +3,8 @@ set -euo pipefail echo "Exporting Variables: " +export MINA_REPO="https://github.com/MinaProtocol/mina.git" + function find_most_recent_numeric_tag() { TAG=$(git describe --always --abbrev=0 $1 | sed 's!/!-!g; s!_!-!g') if [[ $TAG != [0-9]* ]]; then @@ -13,8 +15,7 @@ function find_most_recent_numeric_tag() { export GITHASH=$(git rev-parse --short=7 HEAD) export GITBRANCH=$(git rev-parse --symbolic-full-name --abbrev-ref HEAD | sed 's!/!-!g; s!_!-!g' ) -# GITTAG is the closest tagged commit to this commit, while THIS_COMMIT_TAG only has a value when the current commit is tagged -export GITTAG=$(find_most_recent_numeric_tag HEAD) + export THIS_COMMIT_TAG=$(git tag --points-at HEAD) export PROJECT="mina" @@ -24,8 +25,47 @@ export BUILD_URL=${BUILDKITE_BUILD_URL} set -u export MINA_DEB_CODENAME=${MINA_DEB_CODENAME:=bullseye} - [[ -n "$BUILDKITE_BRANCH" ]] && export GITBRANCH=$(echo "$BUILDKITE_BRANCH" | sed 's!/!-!g; s!_!-!g') + + +if [ "${BUILDKITE_REPO}" != "${MINA_REPO}" ]; then + # We don't want to allow some operations on fork repository which should be done on main repo only. + # Publish to docker hub or publish to unstable debian channel should be exclusive to main repo as it can override + # packages from main repo (by using the same commit and the same branch from forked repository) + + # We don't want to use tags (as this can replace our dockers/debian packages). Instead we are using repo name + # For example: for given repo 'https://github.com/dkijania/mina.git' we convert it to 'dkijania_mina' + export GITTAG=1.0.0$(echo ${BUILDKITE_REPO} | sed -e "s/https:\/\/github.com\///g" | sed -e "s/.git//g" | sed -e "s/\//-/g") + export THIS_COMMIT_TAG="" + RELEASE=unstable + +else + # GITTAG is the closest tagged commit to this commit, while THIS_COMMIT_TAG only has a value when the current commit is tagged + export GITTAG=$(find_most_recent_numeric_tag HEAD) + + # Determine deb repo to use + case $GITBRANCH in + berkeley|rampup|compatible|master|release*) # whitelist of branches that can be tagged + case "${THIS_COMMIT_TAG}" in + *alpha*) # any tag including the string `alpha` + RELEASE=alpha ;; + *beta*) # any tag including the string `beta` + RELEASE=beta ;; + *rampup*) # any tag including the string `rampup` + RELEASE=rampup ;; + ?*) # Any other non-empty tag. ? matches a single character and * matches 0 or more characters. + RELEASE=stable ;; + "") # No tag + RELEASE=unstable ;; + *) # The above set of cases should be exhaustive, if they're not then still set RELEASE=unstable + RELEASE=unstable + echo "git tag --points-at HEAD may have failed, falling back to unstable. Value: \"$(git tag --points-at HEAD)\"" + ;; + esac ;; + *) + RELEASE=unstable ;; + esac +fi if [[ -n "${THIS_COMMIT_TAG}" ]]; then # If the commit is tagged export MINA_DEB_VERSION="${GITTAG}-${GITHASH}" @@ -35,30 +75,6 @@ else export MINA_DOCKER_TAG="$(echo "${MINA_DEB_VERSION}-${MINA_DEB_CODENAME}" | sed 's!/!-!g; s!_!-!g')" fi - -# Determine deb repo to use -case $GITBRANCH in - berkeley|rampup|compatible|master|release*) # whitelist of branches that can be tagged - case "${THIS_COMMIT_TAG}" in - *alpha*) # any tag including the string `alpha` - RELEASE=alpha ;; - *beta*) # any tag including the string `beta` - RELEASE=beta ;; - *rampup*) # any tag including the string `rampup` - RELEASE=rampup ;; - ?*) # Any other non-empty tag. ? matches a single character and * matches 0 or more characters. - RELEASE=stable ;; - "") # No tag - RELEASE=unstable ;; - *) # The above set of cases should be exhaustive, if they're not then still set RELEASE=unstable - RELEASE=unstable - echo "git tag --points-at HEAD may have failed, falling back to unstable. Value: \"$(git tag --points-at HEAD)\"" - ;; - esac ;; - *) - RELEASE=unstable ;; -esac - # Determine the packages to build (mainnet y/N) case $GITBRANCH in compatible|master|release/1*) # whitelist of branches that are "mainnet-like" diff --git a/buildkite/scripts/generate-diff.sh b/buildkite/scripts/generate-diff.sh index 08d27caa9522..d2e34844b716 100755 --- a/buildkite/scripts/generate-diff.sh +++ b/buildkite/scripts/generate-diff.sh @@ -2,6 +2,8 @@ TAG=$(git tag --points-at HEAD) +source buildkite/scripts/handle-fork.sh + # If this is not a PR build, or the HEAD is tagged, the entire build is dirty if [ -z "${BUILDKITE_PULL_REQUEST_BASE_BRANCH}" ]; then echo "This is not a PR build; considering all files dirty" >&2 @@ -11,7 +13,7 @@ elif [ -n "${TAG}" ]; then git ls-files else COMMIT=$(git log -1 --pretty=format:%H) - BASE_COMMIT=$(git log "origin/${BUILDKITE_PULL_REQUEST_BASE_BRANCH}" -1 --pretty=format:%H) + BASE_COMMIT=$(git log "${REMOTE}/${BUILDKITE_PULL_REQUEST_BASE_BRANCH}" -1 --pretty=format:%H) echo "Diffing current commit: ${COMMIT} against branch: ${BUILDKITE_PULL_REQUEST_BASE_BRANCH} (${BASE_COMMIT})" >&2 - git diff "origin/${BUILDKITE_PULL_REQUEST_BASE_BRANCH}" --name-only + git diff "${REMOTE}/${BUILDKITE_PULL_REQUEST_BASE_BRANCH}" --name-only fi diff --git a/buildkite/scripts/handle-fork.sh b/buildkite/scripts/handle-fork.sh new file mode 100755 index 000000000000..50ea0787463d --- /dev/null +++ b/buildkite/scripts/handle-fork.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +export MINA_REPO="https://github.com/MinaProtocol/mina.git" + +if [ "${BUILDKITE_REPO}" == ${MINA_REPO} ]; then + echo "This is not a Forked repo, skipping..." + export REMOTE="origin" + export FORK=0 +else + git remote add mina ${MINA_REPO} || true + git fetch mina + export REMOTE="mina" + export FORK=1 +fi \ No newline at end of file diff --git a/buildkite/scripts/merges-cleanly.sh b/buildkite/scripts/merges-cleanly.sh index f98234c92c62..8531620275e0 100755 --- a/buildkite/scripts/merges-cleanly.sh +++ b/buildkite/scripts/merges-cleanly.sh @@ -4,21 +4,23 @@ BRANCH=$1 CURRENT=$(git branch --show-current) echo 'Testing for conflicts between the current branch `'"${CURRENT}"'` and `'"${BRANCH}"'`...' + # Adapted from this stackoverflow answer: https://stackoverflow.com/a/10856937 # The git merge-tree command shows the content of a 3-way merge without # touching the index, which we can then search for conflict markers. # Tell git where to find ssl certs git config --global http.sslCAInfo /etc/ssl/certs/ca-bundle.crt -# Fetch a fresh copy of the repo -git fetch origin + +source buildkite/scripts/handle-fork.sh + git config --global user.email "hello@ci.com" git config --global user.name "It's me, CI" # Check mergeability. We use flags so that # * `--no-commit` stops us from updating the index with a merge commit, # * `--no-ff` stops us from updating the index to the HEAD, if the merge is a # straightforward fast-forward -git merge --no-commit --no-ff origin/$BRANCH +git merge --no-commit --no-ff ${REMOTE}/$BRANCH RET=$? diff --git a/buildkite/scripts/run-snark-transaction-profiler.sh b/buildkite/scripts/run-snark-transaction-profiler.sh index 31d7bd8777a4..82cb16a3c93a 100755 --- a/buildkite/scripts/run-snark-transaction-profiler.sh +++ b/buildkite/scripts/run-snark-transaction-profiler.sh @@ -29,5 +29,5 @@ K=1 MAX_NUM_UPDATES=4 MIN_NUM_UPDATES=2 -echo "--- Run Snark Transaction Profiler with parameters: --zkapps --k ${K} --max-num-updates ${MAX_NUM_UPDATES} --min-num-updates ${MIN_NUM_UPDATES}" -python3 ./scripts/snark_transaction_profiler.py ${K} ${MAX_NUM_UPDATES} ${MIN_NUM_UPDATES} \ No newline at end of file +echo "-- Run Snark Transaction Profiler with parameters: --zkapps --k ${K} --max-num-updates ${MAX_NUM_UPDATES} --min-num-updates ${MIN_NUM_UPDATES}" +python3 ./scripts/snark_transaction_profiler.py ${K} ${MAX_NUM_UPDATES} ${MIN_NUM_UPDATES} diff --git a/buildkite/scripts/version-linter.sh b/buildkite/scripts/version-linter.sh index 960f4c09d2d3..603ad0bf10e0 100755 --- a/buildkite/scripts/version-linter.sh +++ b/buildkite/scripts/version-linter.sh @@ -15,13 +15,15 @@ apt-get install -y git apt-transport-https ca-certificates tzdata curl python3 p git config --global --add safe.directory /workdir + +source buildkite/scripts/handle-fork.sh source buildkite/scripts/export-git-env-vars.sh pip3 install sexpdata -base_branch=origin/${BUILDKITE_PULL_REQUEST_BASE_BRANCH} +base_branch=${REMOTE}/${BUILDKITE_PULL_REQUEST_BASE_BRANCH} pr_branch=origin/${BUILDKITE_BRANCH} -release_branch=origin/$1 +release_branch=${REMOTE}/$1 echo "--- Run Python version linter with branches: ${pr_branch} ${base_branch} ${release_branch}" ./scripts/version-linter.py ${pr_branch} ${base_branch} ${release_branch} \ No newline at end of file diff --git a/buildkite/src/Command/DockerImage.dhall b/buildkite/src/Command/DockerImage.dhall index 23b5d6b6b5f2..cc1012ea70d4 100644 --- a/buildkite/src/Command/DockerImage.dhall +++ b/buildkite/src/Command/DockerImage.dhall @@ -17,6 +17,7 @@ let ReleaseSpec = { service: Text, version: Text, branch: Text, + repo: Text, deb_codename: Text, deb_release: Text, deb_version: Text, @@ -29,6 +30,7 @@ let ReleaseSpec = { version = "\\\${MINA_DOCKER_TAG}", service = "\\\${MINA_SERVICE}", branch = "\\\${BUILDKITE_BRANCH}", + repo = "\\\${BUILDKITE_REPO}", deb_codename = "bullseye", deb_release = "\\\${MINA_DEB_RELEASE}", deb_version = "\\\${MINA_DEB_VERSION}", @@ -43,7 +45,7 @@ let generateStep = \(spec : ReleaseSpec.Type) -> [ Cmd.run ( "export MINA_DEB_CODENAME=${spec.deb_codename} && source ./buildkite/scripts/export-git-env-vars.sh && ./scripts/release-docker.sh " ++ - "--service ${spec.service} --version ${spec.version} --network ${spec.network} --branch ${spec.branch} --deb-codename ${spec.deb_codename} --deb-release ${spec.deb_release} --deb-version ${spec.deb_version} --extra-args \\\"${spec.extra_args}\\\"" + "--service ${spec.service} --version ${spec.version} --network ${spec.network} --branch ${spec.branch} --deb-codename ${spec.deb_codename} --deb-release ${spec.deb_release} --deb-version ${spec.deb_version} --repo ${spec.repo} --extra-args \\\"${spec.extra_args}\\\"" ) ] diff --git a/buildkite/src/Jobs/Test/VersionLint.dhall b/buildkite/src/Jobs/Test/VersionLint.dhall index fcbdc34f21a7..02418541684f 100644 --- a/buildkite/src/Jobs/Test/VersionLint.dhall +++ b/buildkite/src/Jobs/Test/VersionLint.dhall @@ -48,7 +48,7 @@ Pipeline.build let lintDirtyWhen = [ S.strictlyStart (S.contains "src"), S.exactly "buildkite/src/Jobs/Test/VersionLint" "dhall", - S.exactly "buildkite/scripts/version_linter" "sh" + S.exactly "buildkite/scripts/version-linter" "sh" ] in diff --git a/buildkite/src/Monorepo.dhall b/buildkite/src/Monorepo.dhall index 64783a0582fe..408ed1a8d716 100644 --- a/buildkite/src/Monorepo.dhall +++ b/buildkite/src/Monorepo.dhall @@ -28,6 +28,7 @@ let jobs : List JobSpec.Type = let prefixCommands = [ Cmd.run "git config --global http.sslCAInfo /etc/ssl/certs/ca-bundle.crt", -- Tell git where to find certs for https connections Cmd.run "git fetch origin", -- Freshen the cache + Cmd.run "./buildkite/scripts/handle-fork.sh", Cmd.run "./buildkite/scripts/generate-diff.sh > _computed_diff.txt" ] diff --git a/scripts/release-docker.sh b/scripts/release-docker.sh index 4407af6656f1..4587129a8a5a 100755 --- a/scripts/release-docker.sh +++ b/scripts/release-docker.sh @@ -21,6 +21,7 @@ function usage() { echo " -v, --version The version to be used in the docker image tag" echo " -n, --network The network configuration to use (devnet or mainnet). Default=devnet" echo " -b, --branch The branch of the mina repository to use for staged docker builds. Default=compatible" + echo " -r, --repo The currently used mina repository" echo " --deb-codename The debian codename (stretch or buster) to build the docker image from. Default=stretch" echo " --deb-release The debian package release channel to pull from (unstable,alpha,beta,stable). Default=unstable" echo " --deb-version The version string for the debian package to install" @@ -37,6 +38,7 @@ while [[ "$#" -gt 0 ]]; do case $1 in -n|--network) NETWORK="--build-arg network=$2"; shift;; -b|--branch) BRANCH="--build-arg MINA_BRANCH=$2"; shift;; -c|--cache-from) CACHE="--cache-from $2"; shift;; + -r|--repo) MINA_REPO="$2"; shift;; --deb-codename) DEB_CODENAME="--build-arg deb_codename=$2"; shift;; --deb-release) DEB_RELEASE="--build-arg deb_release=$2"; shift;; --deb-version) DEB_VERSION="--build-arg deb_version=$2"; shift;; @@ -120,8 +122,8 @@ itn-orchestrator) esac -REPO="--build-arg MINA_REPO=${BUILDKITE_PULL_REQUEST_REPO}" -if [[ -z "${BUILDKITE_PULL_REQUEST_REPO}" ]]; then +REPO="--build-arg MINA_REPO=${MINA_REPO}" +if [[ -z "${MINA_REPO}" ]]; then REPO="--build-arg MINA_REPO=https://github.com/MinaProtocol/mina" fi From 5eb0f916577b9a020d29e77ce0209a8ab4b81408 Mon Sep 17 00:00:00 2001 From: dkijania Date: Wed, 22 Nov 2023 16:15:36 +0100 Subject: [PATCH 025/119] fetch remote instead of origin --- buildkite/scripts/refresh_code.sh | 4 ++++ buildkite/src/Monorepo.dhall | 3 +-- 2 files changed, 5 insertions(+), 2 deletions(-) create mode 100755 buildkite/scripts/refresh_code.sh diff --git a/buildkite/scripts/refresh_code.sh b/buildkite/scripts/refresh_code.sh new file mode 100755 index 000000000000..4a65fab97093 --- /dev/null +++ b/buildkite/scripts/refresh_code.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +source ./buildkite/scripts/handle-fork.sh +git fetch ${REMOTE} \ No newline at end of file diff --git a/buildkite/src/Monorepo.dhall b/buildkite/src/Monorepo.dhall index 408ed1a8d716..cec4c1eb0dd4 100644 --- a/buildkite/src/Monorepo.dhall +++ b/buildkite/src/Monorepo.dhall @@ -27,8 +27,7 @@ let jobs : List JobSpec.Type = let prefixCommands = [ Cmd.run "git config --global http.sslCAInfo /etc/ssl/certs/ca-bundle.crt", -- Tell git where to find certs for https connections - Cmd.run "git fetch origin", -- Freshen the cache - Cmd.run "./buildkite/scripts/handle-fork.sh", + Cmd.run "./buildkite/scripts/refresh_code.sh", Cmd.run "./buildkite/scripts/generate-diff.sh > _computed_diff.txt" ] From c5c2f77698ef36b311192edd2d086d0a9ba8a5c4 Mon Sep 17 00:00:00 2001 From: Bruno Deferrari Date: Fri, 3 Nov 2023 21:38:42 -0300 Subject: [PATCH 026/119] Optimization: avoid duplicating proofs by reintroducing duplicates in the verifier responses --- src/lib/verifier/prod.ml | 43 +++++++++++++++++++++++++++++----------- 1 file changed, 31 insertions(+), 12 deletions(-) diff --git a/src/lib/verifier/prod.ml b/src/lib/verifier/prod.ml index 6f16397a98c3..0ed20add9f44 100644 --- a/src/lib/verifier/prod.ml +++ b/src/lib/verifier/prod.ml @@ -19,7 +19,7 @@ module Worker_state = struct val verify_commands : Mina_base.User_command.Verifiable.t With_status.t list - -> [ `Valid of Mina_base.User_command.Valid.t + -> [ `Valid | `Valid_assuming of ( Pickles.Side_loaded.Verification_key.t * Mina_base.Zkapp_statement.t @@ -94,10 +94,10 @@ module Worker_state = struct Pickles.Side_loaded.verify ~typ:Zkapp_statement.typ to_verify in List.map cs ~f:(function - | `Valid c -> - `Valid c - | `Valid_assuming (c, xs) -> - if Or_error.is_ok all_verified then `Valid c + | `Valid _ -> + `Valid + | `Valid_assuming (_, xs) -> + if Or_error.is_ok all_verified then `Valid else `Valid_assuming xs | `Invalid_keys keys -> `Invalid_keys keys @@ -173,10 +173,10 @@ module Worker_state = struct let verify_commands cs = List.map cs ~f:(fun c -> match Common.check c with - | `Valid c -> - `Valid c - | `Valid_assuming (c, _) -> - `Valid c + | `Valid _ -> + `Valid + | `Valid_assuming (_, _) -> + `Valid | `Invalid_keys keys -> `Invalid_keys keys | `Invalid_signature keys -> @@ -232,7 +232,7 @@ module Worker = struct ; verify_commands : ( 'w , User_command.Verifiable.t With_status.t list - , [ `Valid of User_command.Valid.t + , [ `Valid | `Valid_assuming of ( Pickles.Side_loaded.Verification_key.t * Mina_base.Zkapp_statement.t @@ -316,7 +316,7 @@ module Worker = struct With_status.Stable.Latest.t list] , [%bin_type_class: - [ `Valid of User_command.Valid.Stable.Latest.t + [ `Valid | `Valid_assuming of ( Pickles.Side_loaded.Verification_key.Stable.Latest.t * Mina_base.Zkapp_statement.Stable.Latest.t @@ -657,13 +657,32 @@ let verify_transaction_snarks = wrap_verify_snarks_with_trace ~checkpoint_before:"Verify_transaction_snarks" ~checkpoint_after:"Verify_transaction_snarks_done" verify_transaction_snarks +(* Injects validated command back into `Validated results *) +let adjust_valid_results ts rs = + List.map2_exn ts rs ~f:(fun c r -> + match r with + | #invalid as invalid -> + invalid + | `Valid_assuming x -> + `Valid_assuming x + | `Valid -> + (* We know that the result matches the input, and that it is valid. + Since the response has been changed to avoid allocating duplicated proofs, + we need to add the command back here *) + let (`If_this_is_used_it_should_have_a_comment_justifying_it c) = + User_command.to_valid_unsafe + (User_command.of_verifiable (With_status.data c)) + in + `Valid c ) + let verify_commands { worker; logger } ts = O1trace.thread "dispatch_user_command_verification" (fun () -> with_retry ~logger (fun () -> let%bind { connection; _ } = Ivar.read !worker in Worker.Connection.run connection ~f:Worker.functions.verify_commands ~arg:ts - |> Deferred.Or_error.map ~f:(fun x -> `Continue x) ) ) + |> Deferred.Or_error.map ~f:(fun rs -> + `Continue (adjust_valid_results ts rs) ) ) ) let verify_commands t ts = let logger = t.logger in From 61e7d343f11847cdede52c2faae56b5d67b96457 Mon Sep 17 00:00:00 2001 From: Bruno Deferrari Date: Tue, 7 Nov 2023 13:10:25 -0300 Subject: [PATCH 027/119] Clarify and comment code --- src/lib/verifier/prod.ml | 34 ++++++++++++++++++++++------------ 1 file changed, 22 insertions(+), 12 deletions(-) diff --git a/src/lib/verifier/prod.ml b/src/lib/verifier/prod.ml index 0ed20add9f44..d3093cb3e1a0 100644 --- a/src/lib/verifier/prod.ml +++ b/src/lib/verifier/prod.ml @@ -72,6 +72,10 @@ module Worker_state = struct let proof_level = proof_level end) + (* [verify_commands cs] verifies user commands, maintaining their input order in its output. + This ordering is vital as it allows the parent process to inject verified commands back + into the workflow without re-decoding a copy of the command in the result, thus avoiding + proof duplication and reducing memory overhead. *) let verify_commands (cs : User_command.Verifiable.t With_status.t list) : _ list Deferred.t = @@ -657,23 +661,28 @@ let verify_transaction_snarks = wrap_verify_snarks_with_trace ~checkpoint_before:"Verify_transaction_snarks" ~checkpoint_after:"Verify_transaction_snarks_done" verify_transaction_snarks -(* Injects validated command back into `Validated results *) -let adjust_valid_results ts rs = - List.map2_exn ts rs ~f:(fun c r -> - match r with +(* Reinjects the original user commands into the validation results, + assuming that the orders of commands and results are synchronized. + This avoids duplicating proof data by not sending it back from the subprocess. *) +let reinject_valid_user_commands_into_valid_results commands results = + List.map2_exn commands results ~f:(fun command result -> + match result with | #invalid as invalid -> - invalid + invalid (* Directly return invalid results *) | `Valid_assuming x -> `Valid_assuming x | `Valid -> - (* We know that the result matches the input, and that it is valid. - Since the response has been changed to avoid allocating duplicated proofs, - we need to add the command back here *) - let (`If_this_is_used_it_should_have_a_comment_justifying_it c) = + (* Since we have stripped the transaction from the result to save memory, + we reconstruct it here knowing the result is valid and the ordering is + maintained. + The use of to_valid_unsafe is justified because a [`Valid] result for this + command means that it has indeed been validated. *) + let (`If_this_is_used_it_should_have_a_comment_justifying_it + command_valid ) = User_command.to_valid_unsafe - (User_command.of_verifiable (With_status.data c)) + (User_command.of_verifiable (With_status.data command)) in - `Valid c ) + `Valid command_valid ) let verify_commands { worker; logger } ts = O1trace.thread "dispatch_user_command_verification" (fun () -> @@ -682,7 +691,8 @@ let verify_commands { worker; logger } ts = Worker.Connection.run connection ~f:Worker.functions.verify_commands ~arg:ts |> Deferred.Or_error.map ~f:(fun rs -> - `Continue (adjust_valid_results ts rs) ) ) ) + `Continue + (reinject_valid_user_commands_into_valid_results ts rs) ) ) ) let verify_commands t ts = let logger = t.logger in From b2681e5c1bbbffc9fb8f544d9b3898e5a9b56404 Mon Sep 17 00:00:00 2001 From: Bruno Deferrari Date: Tue, 7 Nov 2023 14:40:01 -0300 Subject: [PATCH 028/119] Use tagged inputs/results to ensure inputs and results ordering will match even if ordering changes --- src/lib/verifier/prod.ml | 203 +++++++++++++++++++++++---------------- 1 file changed, 120 insertions(+), 83 deletions(-) diff --git a/src/lib/verifier/prod.ml b/src/lib/verifier/prod.ml index d3093cb3e1a0..ef33349a2ddf 100644 --- a/src/lib/verifier/prod.ml +++ b/src/lib/verifier/prod.ml @@ -8,6 +8,26 @@ open Blockchain_snark type invalid = Common.invalid [@@deriving bin_io_unversioned, to_yojson] +module With_id_tag = struct + type 'a t = int * 'a [@@deriving bin_io_unversioned] + + let tag_list = List.mapi ~f:(fun id command -> (id, command)) + + (* This function associates each tagged inputs with its corresponding result based + on the ID, and returns a list of tuples (input, result). *) + let reassociate_tagged_results tagged_inputs tagged_results = + let result_map = Int.Map.of_alist_exn tagged_results in + List.map tagged_inputs ~f:(fun (id, input) -> + let result = + match Int.Map.find result_map id with + | Some res -> + res + | None -> + failwith "Verification result missing for command" + in + (input, result) ) +end + let invalid_to_error = Common.invalid_to_error type ledger_proof = Ledger_proof.Prod.t @@ -18,7 +38,7 @@ module Worker_state = struct (Protocol_state.Value.t * Proof.t) list -> unit Or_error.t Deferred.t val verify_commands : - Mina_base.User_command.Verifiable.t With_status.t list + Mina_base.User_command.Verifiable.t With_status.t With_id_tag.t list -> [ `Valid | `Valid_assuming of ( Pickles.Side_loaded.Verification_key.t @@ -26,6 +46,7 @@ module Worker_state = struct * Pickles.Side_loaded.Proof.t ) list | invalid ] + With_id_tag.t list Deferred.t @@ -72,49 +93,56 @@ module Worker_state = struct let proof_level = proof_level end) - (* [verify_commands cs] verifies user commands, maintaining their input order in its output. - This ordering is vital as it allows the parent process to inject verified commands back - into the workflow without re-decoding a copy of the command in the result, thus avoiding - proof duplication and reducing memory overhead. *) let verify_commands - (cs : User_command.Verifiable.t With_status.t list) : - _ list Deferred.t = - let cs = List.map cs ~f:Common.check in + (cs : + User_command.Verifiable.t With_status.t With_id_tag.t list ) + : _ list Deferred.t = + let results = + List.map cs ~f:(fun (id, c) -> (id, Common.check c)) + in let to_verify = - List.concat_map cs ~f:(function - | `Valid _ -> - [] - | `Valid_assuming (_, xs) -> - xs - | `Invalid_keys _ - | `Invalid_signature _ - | `Invalid_proof _ - | `Missing_verification_key _ - | `Unexpected_verification_key _ - | `Mismatched_authorization_kind _ -> - [] ) + results |> List.map ~f:snd + |> List.concat_map ~f:(function + | `Valid _ -> + [] + | `Valid_assuming (_, xs) -> + xs + | `Invalid_keys _ + | `Invalid_signature _ + | `Invalid_proof _ + | `Missing_verification_key _ + | `Unexpected_verification_key _ + | `Mismatched_authorization_kind _ -> + [] ) in let%map all_verified = Pickles.Side_loaded.verify ~typ:Zkapp_statement.typ to_verify in - List.map cs ~f:(function - | `Valid _ -> - `Valid - | `Valid_assuming (_, xs) -> - if Or_error.is_ok all_verified then `Valid - else `Valid_assuming xs - | `Invalid_keys keys -> - `Invalid_keys keys - | `Invalid_signature keys -> - `Invalid_signature keys - | `Invalid_proof err -> - `Invalid_proof err - | `Missing_verification_key keys -> - `Missing_verification_key keys - | `Unexpected_verification_key keys -> - `Unexpected_verification_key keys - | `Mismatched_authorization_kind keys -> - `Mismatched_authorization_kind keys ) + List.map results ~f:(fun (id, result) -> + let result = + match result with + | `Valid _ -> + (* The command is dropped here to avoid decoding it later in the caller + which would create a duplicate. Results are paired back to their inputs + using the input [id]*) + `Valid + | `Valid_assuming (_, xs) -> + if Or_error.is_ok all_verified then `Valid + else `Valid_assuming xs + | `Invalid_keys keys -> + `Invalid_keys keys + | `Invalid_signature keys -> + `Invalid_signature keys + | `Invalid_proof err -> + `Invalid_proof err + | `Missing_verification_key keys -> + `Missing_verification_key keys + | `Unexpected_verification_key keys -> + `Unexpected_verification_key keys + | `Mismatched_authorization_kind keys -> + `Mismatched_authorization_kind keys + in + (id, result) ) let verify_commands cs = Internal_tracing.Context_logger.with_logger (Some logger) @@ -174,25 +202,28 @@ module Worker_state = struct | Check | None -> Deferred.return @@ ( module struct - let verify_commands cs = - List.map cs ~f:(fun c -> - match Common.check c with - | `Valid _ -> - `Valid - | `Valid_assuming (_, _) -> - `Valid - | `Invalid_keys keys -> - `Invalid_keys keys - | `Invalid_signature keys -> - `Invalid_signature keys - | `Invalid_proof err -> - `Invalid_proof err - | `Missing_verification_key keys -> - `Missing_verification_key keys - | `Unexpected_verification_key keys -> - `Unexpected_verification_key keys - | `Mismatched_authorization_kind keys -> - `Mismatched_authorization_kind keys ) + let verify_commands tagged_commands = + List.map tagged_commands ~f:(fun (id, c) -> + let result = + match Common.check c with + | `Valid _ -> + `Valid + | `Valid_assuming (_, _) -> + `Valid + | `Invalid_keys keys -> + `Invalid_keys keys + | `Invalid_signature keys -> + `Invalid_signature keys + | `Invalid_proof err -> + `Invalid_proof err + | `Missing_verification_key keys -> + `Missing_verification_key keys + | `Unexpected_verification_key keys -> + `Unexpected_verification_key keys + | `Mismatched_authorization_kind keys -> + `Mismatched_authorization_kind keys + in + (id, result) ) |> Deferred.return let verify_blockchain_snarks _ = Deferred.return (Ok ()) @@ -235,7 +266,7 @@ module Worker = struct ('w, (Transaction_snark.t * Sok_message.t) list, unit Or_error.t) F.t ; verify_commands : ( 'w - , User_command.Verifiable.t With_status.t list + , User_command.Verifiable.t With_status.t With_id_tag.t list , [ `Valid | `Valid_assuming of ( Pickles.Side_loaded.Verification_key.t @@ -243,6 +274,7 @@ module Worker = struct * Pickles.Side_loaded.Proof.t ) list | invalid ] + With_id_tag.t list ) F.t ; get_blockchain_verification_key : @@ -318,6 +350,7 @@ module Worker = struct ( [%bin_type_class: User_command.Verifiable.Stable.Latest.t With_status.Stable.Latest.t + With_id_tag.t list] , [%bin_type_class: [ `Valid @@ -327,6 +360,7 @@ module Worker = struct * Pickles.Side_loaded.Proof.Stable.Latest.t ) list | invalid ] + With_id_tag.t list] , verify_commands ) ; get_blockchain_verification_key = @@ -661,38 +695,41 @@ let verify_transaction_snarks = wrap_verify_snarks_with_trace ~checkpoint_before:"Verify_transaction_snarks" ~checkpoint_after:"Verify_transaction_snarks_done" verify_transaction_snarks -(* Reinjects the original user commands into the validation results, - assuming that the orders of commands and results are synchronized. +(* Reinjects the original user commands into the validation results. This avoids duplicating proof data by not sending it back from the subprocess. *) -let reinject_valid_user_commands_into_valid_results commands results = - List.map2_exn commands results ~f:(fun command result -> - match result with - | #invalid as invalid -> - invalid (* Directly return invalid results *) - | `Valid_assuming x -> - `Valid_assuming x - | `Valid -> - (* Since we have stripped the transaction from the result to save memory, - we reconstruct it here knowing the result is valid and the ordering is - maintained. - The use of to_valid_unsafe is justified because a [`Valid] result for this - command means that it has indeed been validated. *) - let (`If_this_is_used_it_should_have_a_comment_justifying_it - command_valid ) = - User_command.to_valid_unsafe - (User_command.of_verifiable (With_status.data command)) - in - `Valid command_valid ) +let reinject_valid_user_command_into_valid_result (command, result) = + match result with + | #invalid as invalid -> + invalid + | `Valid_assuming x -> + `Valid_assuming x + | `Valid -> + (* Since we have stripped the transaction from the result, we reconstruct it here. + The use of [to_valid_unsafe] is justified because a [`Valid] result for this + command means that it has indeed been validated. *) + let (`If_this_is_used_it_should_have_a_comment_justifying_it command_valid) + = + User_command.to_valid_unsafe + (User_command.of_verifiable (With_status.data command)) + in + `Valid command_valid + +let finalize_verification_results tagged_commands tagged_results = + With_id_tag.reassociate_tagged_results tagged_commands tagged_results + |> List.map ~f:reinject_valid_user_command_into_valid_result let verify_commands { worker; logger } ts = O1trace.thread "dispatch_user_command_verification" (fun () -> with_retry ~logger (fun () -> let%bind { connection; _ } = Ivar.read !worker in + let tagged_commands = With_id_tag.tag_list ts in Worker.Connection.run connection ~f:Worker.functions.verify_commands - ~arg:ts - |> Deferred.Or_error.map ~f:(fun rs -> - `Continue - (reinject_valid_user_commands_into_valid_results ts rs) ) ) ) + ~arg:tagged_commands + |> Deferred.Or_error.map ~f:(fun tagged_results -> + let results = + finalize_verification_results tagged_commands tagged_results + in + `Continue results ) ) ) let verify_commands t ts = let logger = t.logger in From 6df9cebaa2b02336be03c6af28de23f7e9239694 Mon Sep 17 00:00:00 2001 From: mrmr1993 Date: Sat, 4 Nov 2023 16:42:25 +0000 Subject: [PATCH 029/119] Batch merkle path database accesses --- src/lib/merkle_ledger/database.ml | 43 ++++++++++++++++++++++++------- 1 file changed, 34 insertions(+), 9 deletions(-) diff --git a/src/lib/merkle_ledger/database.ml b/src/lib/merkle_ledger/database.ml index dc2acab0b2e6..92d41962bf78 100644 --- a/src/lib/merkle_ledger/database.ml +++ b/src/lib/merkle_ledger/database.ml @@ -154,6 +154,16 @@ module Make (Inputs : Inputs_intf) : | None -> empty_hash (Location.height ~ledger_depth:mdb.depth location) + let get_hash_batch mdb locations = + List.iter locations ~f:(fun location -> assert (Location.is_hash location)) ; + let hashes = get_bin_batch mdb locations Hash.bin_read_t in + List.map2_exn locations hashes ~f:(fun location hash -> + match hash with + | Some hash -> + hash + | None -> + empty_hash (Location.height ~ledger_depth:mdb.depth location) ) + let set_raw { kvdb; depth; _ } location bin = Kvdb.set kvdb ~key:(Location.serialize ~ledger_depth:depth location) @@ -684,16 +694,31 @@ module Make (Inputs : Inputs_intf) : else location in assert (Location.is_hash location) ; - let rec loop k = - if Location.height ~ledger_depth:mdb.depth k >= mdb.depth then [] - else - let sibling = Location.sibling k in - let sibling_dir = Location.last_direction (Location.to_path_exn k) in - let hash = get_hash mdb sibling in - Direction.map sibling_dir ~left:(`Left hash) ~right:(`Right hash) - :: loop (Location.parent k) + let rev_locations, rev_directions = + let rec loop k loc_acc dir_acc = + if Location.height ~ledger_depth:mdb.depth k >= mdb.depth then + (loc_acc, dir_acc) + else + let sibling = Location.sibling k in + let sibling_dir = Location.last_direction (Location.to_path_exn k) in + loop (Location.parent k) (sibling :: loc_acc) (sibling_dir :: dir_acc) + in + loop location [] [] + in + let rev_hashes = get_hash_batch mdb rev_locations in + let rec loop directions hashes acc = + match (directions, hashes) with + | [], [] -> + acc + | direction :: directions, hash :: hashes -> + let dir = + Direction.map direction ~left:(`Left hash) ~right:(`Right hash) + in + loop directions hashes (dir :: acc) + | _ -> + failwith "Mismatched lengths" in - loop location + loop rev_directions rev_hashes [] let merkle_path_at_addr_exn t addr = merkle_path t (Location.Hash addr) From 300c5473a4599df6bda5343cca080485346056db Mon Sep 17 00:00:00 2001 From: Nathan Holland Date: Tue, 21 Nov 2023 17:45:53 -0600 Subject: [PATCH 030/119] Refactor merkle path db implementation --- src/lib/merkle_ledger/database.ml | 30 +++++--------------------- src/lib/merkle_ledger/location.ml | 14 ++++++++++++ src/lib/merkle_ledger/location_intf.ml | 2 ++ 3 files changed, 21 insertions(+), 25 deletions(-) diff --git a/src/lib/merkle_ledger/database.ml b/src/lib/merkle_ledger/database.ml index 92d41962bf78..9de0bc7f356d 100644 --- a/src/lib/merkle_ledger/database.ml +++ b/src/lib/merkle_ledger/database.ml @@ -693,32 +693,12 @@ module Make (Inputs : Inputs_intf) : Location.Hash (Location.to_path_exn location) else location in - assert (Location.is_hash location) ; - let rev_locations, rev_directions = - let rec loop k loc_acc dir_acc = - if Location.height ~ledger_depth:mdb.depth k >= mdb.depth then - (loc_acc, dir_acc) - else - let sibling = Location.sibling k in - let sibling_dir = Location.last_direction (Location.to_path_exn k) in - loop (Location.parent k) (sibling :: loc_acc) (sibling_dir :: dir_acc) - in - loop location [] [] - in - let rev_hashes = get_hash_batch mdb rev_locations in - let rec loop directions hashes acc = - match (directions, hashes) with - | [], [] -> - acc - | direction :: directions, hash :: hashes -> - let dir = - Direction.map direction ~left:(`Left hash) ~right:(`Right hash) - in - loop directions hashes (dir :: acc) - | _ -> - failwith "Mismatched lengths" + let dependency_locs, dependency_dirs = + List.unzip (Location.merkle_path_dependencies_exn location) in - loop rev_directions rev_hashes [] + let dependency_hashes = get_hash_batch mdb dependency_locs in + List.map2_exn dependency_dirs dependency_hashes ~f:(fun dir hash -> + Direction.map dir ~left:(`Left hash) ~right:(`Right hash) ) let merkle_path_at_addr_exn t addr = merkle_path t (Location.Hash addr) diff --git a/src/lib/merkle_ledger/location.ml b/src/lib/merkle_ledger/location.ml index 0c56b8327b4c..8c2e6105f2bd 100644 --- a/src/lib/merkle_ledger/location.ml +++ b/src/lib/merkle_ledger/location.ml @@ -147,6 +147,20 @@ module T = struct | Right -> (sibling, base) + let merkle_path_dependencies_exn (location : t) : (t * Direction.t) list = + let rec loop k acc = + if Addr.depth k = 0 then acc + else + let sibling = Hash (Addr.sibling k) in + let sibling_dir = last_direction k in + loop (Addr.parent_exn k) ((sibling, sibling_dir) :: acc) + in + match location with + | Hash addr -> + List.rev (loop addr []) + | _ -> + failwith "can only get merkle path dependencies of a hash location" + type location = t [@@deriving sexp, compare] include Comparable.Make (struct diff --git a/src/lib/merkle_ledger/location_intf.ml b/src/lib/merkle_ledger/location_intf.ml index ee0508a00492..15cbe61d40e7 100644 --- a/src/lib/merkle_ledger/location_intf.ml +++ b/src/lib/merkle_ledger/location_intf.ml @@ -48,5 +48,7 @@ module type S = sig val order_siblings : t -> 'a -> 'a -> 'a * 'a + val merkle_path_dependencies_exn : t -> (t * Direction.t) list + include Comparable.S with type t := t end From 3022675aee42475fe74370b8a5f3dfcc2024263f Mon Sep 17 00:00:00 2001 From: Bruno Deferrari Date: Tue, 21 Nov 2023 23:01:47 -0300 Subject: [PATCH 031/119] When comparing `Field.t` values, avoid the conversion to bigint --- src/lib/crypto/kimchi_backend/common/field.ml | 4 +++- src/lib/crypto/kimchi_backend/kimchi_backend.mli | 4 ++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/src/lib/crypto/kimchi_backend/common/field.ml b/src/lib/crypto/kimchi_backend/common/field.ml index 91d76aa71ffd..77af0a352b12 100644 --- a/src/lib/crypto/kimchi_backend/common/field.ml +++ b/src/lib/crypto/kimchi_backend/common/field.ml @@ -37,6 +37,8 @@ module type Input_intf = sig val is_square : t -> bool + val compare : t -> t -> int + val equal : t -> t -> bool val print : t -> unit @@ -194,7 +196,7 @@ module Make (F : Input_intf) : let hash = Hash.of_fold hash_fold_t - let compare t1 t2 = Bigint.compare (to_bigint t1) (to_bigint t2) + let compare = compare let equal = equal diff --git a/src/lib/crypto/kimchi_backend/kimchi_backend.mli b/src/lib/crypto/kimchi_backend/kimchi_backend.mli index b2fb42a082bb..eb9baab3a9d8 100644 --- a/src/lib/crypto/kimchi_backend/kimchi_backend.mli +++ b/src/lib/crypto/kimchi_backend/kimchi_backend.mli @@ -11,8 +11,6 @@ module Kimchi_backend_common : sig val sexp_of_t : t -> Sexplib0.Sexp.t - val compare : t -> t -> int - val bin_size_t : t Bin_prot.Size.sizer val bin_write_t : t Bin_prot.Write.writer @@ -56,6 +54,8 @@ module Kimchi_backend_common : sig val is_square : t -> bool + val compare : t -> t -> int + val equal : t -> t -> bool val print : t -> unit From 5651bced7ab425022727ebefdd43bd5d609a272a Mon Sep 17 00:00:00 2001 From: Bruno Deferrari Date: Wed, 22 Nov 2023 12:46:35 -0300 Subject: [PATCH 032/119] Use `into_repr()` when comparing fields in Rust to ensure compatibility with protocol and o1js --- src/lib/crypto/kimchi_bindings/stubs/src/arkworks/pasta_fp.rs | 4 ++-- src/lib/crypto/kimchi_bindings/stubs/src/arkworks/pasta_fq.rs | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/lib/crypto/kimchi_bindings/stubs/src/arkworks/pasta_fp.rs b/src/lib/crypto/kimchi_bindings/stubs/src/arkworks/pasta_fp.rs index 3d9144f13033..08264921088f 100644 --- a/src/lib/crypto/kimchi_bindings/stubs/src/arkworks/pasta_fp.rs +++ b/src/lib/crypto/kimchi_bindings/stubs/src/arkworks/pasta_fp.rs @@ -31,7 +31,7 @@ impl CamlFp { unsafe extern "C" fn ocaml_compare(x: ocaml::Raw, y: ocaml::Raw) -> i32 { let x = x.as_pointer::(); let y = y.as_pointer::(); - match x.as_ref().0.cmp(&y.as_ref().0) { + match x.as_ref().0.into_repr().cmp(&y.as_ref().0.into_repr()) { core::cmp::Ordering::Less => -1, core::cmp::Ordering::Equal => 0, core::cmp::Ordering::Greater => 1, @@ -240,7 +240,7 @@ pub fn caml_pasta_fp_mut_square(mut x: ocaml::Pointer) { #[ocaml_gen::func] #[ocaml::func] pub fn caml_pasta_fp_compare(x: ocaml::Pointer, y: ocaml::Pointer) -> ocaml::Int { - match x.as_ref().0.cmp(&y.as_ref().0) { + match x.as_ref().0.into_repr().cmp(&y.as_ref().0.into_repr()) { Less => -1, Equal => 0, Greater => 1, diff --git a/src/lib/crypto/kimchi_bindings/stubs/src/arkworks/pasta_fq.rs b/src/lib/crypto/kimchi_bindings/stubs/src/arkworks/pasta_fq.rs index 8fbca9da595b..bc81f5962a67 100644 --- a/src/lib/crypto/kimchi_bindings/stubs/src/arkworks/pasta_fq.rs +++ b/src/lib/crypto/kimchi_bindings/stubs/src/arkworks/pasta_fq.rs @@ -36,7 +36,7 @@ impl CamlFq { unsafe extern "C" fn ocaml_compare(x: ocaml::Raw, y: ocaml::Raw) -> i32 { let x = x.as_pointer::(); let y = y.as_pointer::(); - match x.as_ref().0.cmp(&y.as_ref().0) { + match x.as_ref().0.into_repr().cmp(&y.as_ref().0.into_repr()) { core::cmp::Ordering::Less => -1, core::cmp::Ordering::Equal => 0, core::cmp::Ordering::Greater => 1, @@ -241,7 +241,7 @@ pub fn caml_pasta_fq_mut_square(mut x: ocaml::Pointer) { #[ocaml_gen::func] #[ocaml::func] pub fn caml_pasta_fq_compare(x: ocaml::Pointer, y: ocaml::Pointer) -> ocaml::Int { - match x.as_ref().0.cmp(&y.as_ref().0) { + match x.as_ref().0.into_repr().cmp(&y.as_ref().0.into_repr()) { Less => -1, Equal => 0, Greater => 1, From 6fcd9395c5643bad6e799233b96929633d8d88fc Mon Sep 17 00:00:00 2001 From: Bruno Deferrari Date: Wed, 22 Nov 2023 18:05:14 -0300 Subject: [PATCH 033/119] Field: More efficient conversion of into Bignum_bigint.t --- src/lib/crypto/kimchi_backend/common/field.ml | 36 ++++++++++++------- 1 file changed, 23 insertions(+), 13 deletions(-) diff --git a/src/lib/crypto/kimchi_backend/common/field.ml b/src/lib/crypto/kimchi_backend/common/field.ml index 77af0a352b12..173bdc7feb15 100644 --- a/src/lib/crypto/kimchi_backend/common/field.ml +++ b/src/lib/crypto/kimchi_backend/common/field.ml @@ -179,20 +179,30 @@ module Make (F : Input_intf) : let of_sexpable = of_bigint end) + let zero = of_int 0 + let to_bignum_bigint n = - let rec go i two_to_the_i acc = - if Int.equal i size_in_bits then acc - else - let acc' = - if Bigint.test_bit n i then Bignum_bigint.(acc + two_to_the_i) - else acc - in - go (i + 1) Bignum_bigint.(two_to_the_i + two_to_the_i) acc' - in - go 0 Bignum_bigint.one Bignum_bigint.zero - - let hash_fold_t s x = - Bignum_bigint.hash_fold_t s (to_bignum_bigint (to_bigint x)) + (* For non-zero values, conversion is done by creating the bin_prot representation + of the [Bignum_bigit.t] value, and then parsing it with bin_prot. *) + match compare n zero with + | 0 -> + Bignum_bigint.zero + | c -> + (* Tag for positive values is 1, negative is 2: + https://github.com/janestreet/bignum/blob/6c63419787a4e209e85befd3d823fff2790677e0/bigint/src/bigint.ml#L27-L30 *) + let tag_byte = if c > 0 then '\x01' else '\x02' in + let bytes = to_bytes n in + let len = Bytes.length bytes in + let size_byte = Char.of_int_exn len in + let buf = Bytes.create (2 + len) in + (* First byte is the tag, second the amount of bytes *) + Bytes.unsafe_set buf 0 tag_byte ; + Bytes.unsafe_set buf 1 size_byte ; + (* Copy the bytes representation of the value, skip the tag and size bytes *) + Bytes.unsafe_blit ~src:bytes ~src_pos:0 ~dst_pos:2 ~len ~dst:buf ; + Bin_prot.Reader.of_bytes Bignum_bigint.Stable.V1.bin_reader_t buf + + let hash_fold_t s x = Bignum_bigint.hash_fold_t s (to_bignum_bigint x) let hash = Hash.of_fold hash_fold_t From b570c8f03b73e032a45ceb5bffb3e848bd4e0160 Mon Sep 17 00:00:00 2001 From: Bruno Deferrari Date: Mon, 27 Nov 2023 19:06:16 -0300 Subject: [PATCH 034/119] Do not use binprot serialization when converting bigint fields, use Zarith directly --- src/lib/crypto/kimchi_backend/common/dune | 1 + src/lib/crypto/kimchi_backend/common/field.ml | 32 ++++++------------- 2 files changed, 11 insertions(+), 22 deletions(-) diff --git a/src/lib/crypto/kimchi_backend/common/dune b/src/lib/crypto/kimchi_backend/common/dune index 3d75a75b7ca5..9a7cb8b8c919 100644 --- a/src/lib/crypto/kimchi_backend/common/dune +++ b/src/lib/crypto/kimchi_backend/common/dune @@ -25,6 +25,7 @@ base.caml ppx_inline_test.config bignum.bigint + zarith base.base_internalhash_types ;; local libraries tuple_lib diff --git a/src/lib/crypto/kimchi_backend/common/field.ml b/src/lib/crypto/kimchi_backend/common/field.ml index 173bdc7feb15..12261d7b9f52 100644 --- a/src/lib/crypto/kimchi_backend/common/field.ml +++ b/src/lib/crypto/kimchi_backend/common/field.ml @@ -179,28 +179,16 @@ module Make (F : Input_intf) : let of_sexpable = of_bigint end) - let zero = of_int 0 - - let to_bignum_bigint n = - (* For non-zero values, conversion is done by creating the bin_prot representation - of the [Bignum_bigit.t] value, and then parsing it with bin_prot. *) - match compare n zero with - | 0 -> - Bignum_bigint.zero - | c -> - (* Tag for positive values is 1, negative is 2: - https://github.com/janestreet/bignum/blob/6c63419787a4e209e85befd3d823fff2790677e0/bigint/src/bigint.ml#L27-L30 *) - let tag_byte = if c > 0 then '\x01' else '\x02' in - let bytes = to_bytes n in - let len = Bytes.length bytes in - let size_byte = Char.of_int_exn len in - let buf = Bytes.create (2 + len) in - (* First byte is the tag, second the amount of bytes *) - Bytes.unsafe_set buf 0 tag_byte ; - Bytes.unsafe_set buf 1 size_byte ; - (* Copy the bytes representation of the value, skip the tag and size bytes *) - Bytes.unsafe_blit ~src:bytes ~src_pos:0 ~dst_pos:2 ~len ~dst:buf ; - Bin_prot.Reader.of_bytes Bignum_bigint.Stable.V1.bin_reader_t buf + let to_bignum_bigint = + let zero = of_int 0 in + let one = of_int 1 in + fun n -> + if equal n zero then Bignum_bigint.zero + else if equal n one then Bignum_bigint.one + else + Bytes.unsafe_to_string + ~no_mutation_while_string_reachable:(to_bytes n) + |> Z.of_bits |> Bignum_bigint.of_zarith_bigint let hash_fold_t s x = Bignum_bigint.hash_fold_t s (to_bignum_bigint x) From da5379243c2b85fde2809283fa57ebcd90ef46eb Mon Sep 17 00:00:00 2001 From: mrmr1993 Date: Mon, 6 Nov 2023 14:57:02 +0000 Subject: [PATCH 035/119] Batch account location lookups for sparse ledger --- src/lib/mina_ledger/sparse_ledger.ml | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/lib/mina_ledger/sparse_ledger.ml b/src/lib/mina_ledger/sparse_ledger.ml index 304a163dffca..2e9271f0decd 100644 --- a/src/lib/mina_ledger/sparse_ledger.ml +++ b/src/lib/mina_ledger/sparse_ledger.ml @@ -8,10 +8,11 @@ let of_ledger_root ledger = let of_ledger_subset_exn (oledger : Ledger.t) keys = let ledger = Ledger.copy oledger in + let locations = Ledger.location_of_account_batch ledger keys in let _, sparse = - List.fold keys - ~f:(fun (new_keys, sl) key -> - match Ledger.location_of_account ledger key with + List.fold locations + ~f:(fun (new_keys, sl) (key, loc) -> + match loc with | Some loc -> ( new_keys , add_path sl From a96b86b070727831942c6aeb9a61e66d0e0cbfd0 Mon Sep 17 00:00:00 2001 From: mrmr1993 Date: Wed, 8 Nov 2023 15:39:53 +0000 Subject: [PATCH 036/119] Fixup Masking_merkle_tree.location_of_account_batch --- src/lib/merkle_mask/masking_merkle_tree.ml | 46 +++++++++++++++++----- 1 file changed, 37 insertions(+), 9 deletions(-) diff --git a/src/lib/merkle_mask/masking_merkle_tree.ml b/src/lib/merkle_mask/masking_merkle_tree.ml index f24ef206a702..4aa7dd5d8203 100644 --- a/src/lib/merkle_mask/masking_merkle_tree.ml +++ b/src/lib/merkle_mask/masking_merkle_tree.ml @@ -535,16 +535,44 @@ module Make (Inputs : Inputs_intf.S) = struct let location_of_account_batch t account_ids = assert_is_attached t ; - let found_locations, leftover_account_ids = - List.partition_map account_ids ~f:(fun account_id -> - match self_find_location t account_id with - | Some location -> - Either.first (account_id, Some location) - | None -> - Either.second account_id ) + let account_ids_with_locations_rev, leftover_account_ids_rev = + let rec go account_ids account_ids_with_locations_rev + leftover_account_ids_rev = + match account_ids with + | [] -> + (account_ids_with_locations_rev, leftover_account_ids_rev) + | account_id :: account_ids -> ( + match self_find_location t account_id with + | None -> + go account_ids + ((account_id, None) :: account_ids_with_locations_rev) + (account_id :: leftover_account_ids_rev) + | Some loc -> + go account_ids + ((account_id, Some loc) :: account_ids_with_locations_rev) + leftover_account_ids_rev ) + in + go account_ids [] [] + in + let leftover_account_id_locs_rev = + Base.location_of_account_batch (get_parent t) leftover_account_ids_rev + in + let rec go account_ids_with_locations_rev leftover_account_ids_rev locs = + match (account_ids_with_locations_rev, leftover_account_ids_rev) with + | [], _ -> + locs + | ( (account_id, None) :: account_ids_with_locations_rev + , (_account_id, loc) :: leftover_account_ids_rev ) -> + go account_ids_with_locations_rev leftover_account_ids_rev + ((account_id, loc) :: locs) + | ( (account_id, Some loc) :: account_ids_with_locations_rev + , leftover_account_ids_rev ) -> + go account_ids_with_locations_rev leftover_account_ids_rev + ((account_id, Some loc) :: locs) + | _ :: _, [] -> + assert false in - found_locations - @ Base.location_of_account_batch (get_parent t) leftover_account_ids + go account_ids_with_locations_rev leftover_account_id_locs_rev [] (* not needed for in-memory mask; in the database, it's currently a NOP *) let make_space_for t = From d10bf7edea5f33d525e5756a5244801e92876d2b Mon Sep 17 00:00:00 2001 From: georgeee Date: Wed, 8 Nov 2023 17:22:25 +0100 Subject: [PATCH 037/119] Fixup batching operations in masking ledger --- src/lib/merkle_mask/masking_merkle_tree.ml | 77 ++++++++-------------- 1 file changed, 28 insertions(+), 49 deletions(-) diff --git a/src/lib/merkle_mask/masking_merkle_tree.ml b/src/lib/merkle_mask/masking_merkle_tree.ml index 4aa7dd5d8203..5dd06f6026c2 100644 --- a/src/lib/merkle_mask/masking_merkle_tree.ml +++ b/src/lib/merkle_mask/masking_merkle_tree.ml @@ -183,17 +183,33 @@ module Make (Inputs : Inputs_intf.S) = struct | None -> Base.get (get_parent t) location - let get_batch t locations = + let self_find_or_batch_lookup self_find lookup_parent t ids = assert_is_attached t ; - let found_accounts, leftover_locations = - List.partition_map locations ~f:(fun location -> - match self_find_account t location with - | Some account -> - Either.first (location, Some account) - | None -> - Either.second location ) + let self_found_or_none = + List.map ids ~f:(fun id -> (id, self_find t id)) + in + let not_found = + List.filter_map self_found_or_none ~f:(function + | id, None -> + Some id + | _ -> + None ) in - found_accounts @ Base.get_batch (get_parent t) leftover_locations + let from_parent = lookup_parent (get_parent t) not_found in + let _, res = + List.fold_map self_found_or_none ~init:from_parent + ~f:(fun from_parent (id, self_found) -> + match (self_found, from_parent) with + | None, r :: rest -> + (rest, r) + | Some _, _ -> + (from_parent, (id, self_found)) + | _ -> + failwith "unexpected number of results from DB" ) + in + res + + let get_batch = self_find_or_batch_lookup self_find_account Base.get_batch (* fixup_merkle_path patches a Merkle path reported by the parent, overriding with hashes which are stored in the mask *) @@ -533,46 +549,9 @@ module Make (Inputs : Inputs_intf.S) = struct | None -> Base.location_of_account (get_parent t) account_id - let location_of_account_batch t account_ids = - assert_is_attached t ; - let account_ids_with_locations_rev, leftover_account_ids_rev = - let rec go account_ids account_ids_with_locations_rev - leftover_account_ids_rev = - match account_ids with - | [] -> - (account_ids_with_locations_rev, leftover_account_ids_rev) - | account_id :: account_ids -> ( - match self_find_location t account_id with - | None -> - go account_ids - ((account_id, None) :: account_ids_with_locations_rev) - (account_id :: leftover_account_ids_rev) - | Some loc -> - go account_ids - ((account_id, Some loc) :: account_ids_with_locations_rev) - leftover_account_ids_rev ) - in - go account_ids [] [] - in - let leftover_account_id_locs_rev = - Base.location_of_account_batch (get_parent t) leftover_account_ids_rev - in - let rec go account_ids_with_locations_rev leftover_account_ids_rev locs = - match (account_ids_with_locations_rev, leftover_account_ids_rev) with - | [], _ -> - locs - | ( (account_id, None) :: account_ids_with_locations_rev - , (_account_id, loc) :: leftover_account_ids_rev ) -> - go account_ids_with_locations_rev leftover_account_ids_rev - ((account_id, loc) :: locs) - | ( (account_id, Some loc) :: account_ids_with_locations_rev - , leftover_account_ids_rev ) -> - go account_ids_with_locations_rev leftover_account_ids_rev - ((account_id, Some loc) :: locs) - | _ :: _, [] -> - assert false - in - go account_ids_with_locations_rev leftover_account_id_locs_rev [] + let location_of_account_batch = + self_find_or_batch_lookup self_find_location + Base.location_of_account_batch (* not needed for in-memory mask; in the database, it's currently a NOP *) let make_space_for t = From f8c70177747c0d21d1473365e17ff9017e0ac1ce Mon Sep 17 00:00:00 2001 From: georgeee Date: Tue, 28 Nov 2023 18:17:31 +0100 Subject: [PATCH 038/119] Rename variable in merkle_path_dependencies_exn --- src/lib/merkle_ledger/location.ml | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/src/lib/merkle_ledger/location.ml b/src/lib/merkle_ledger/location.ml index 8c2e6105f2bd..e19fc5f6f14f 100644 --- a/src/lib/merkle_ledger/location.ml +++ b/src/lib/merkle_ledger/location.ml @@ -147,17 +147,23 @@ module T = struct | Right -> (sibling, base) + (* Returns a reverse of traversal path from top of the tree to the location + (direction to take and sibling's hash).contents + + By reverse it means that head of returned list contains direction from + location's parent to the location along with the location's sibling. + *) let merkle_path_dependencies_exn (location : t) : (t * Direction.t) list = - let rec loop k acc = - if Addr.depth k = 0 then acc + let rec loop k = + if Addr.depth k = 0 then [] else let sibling = Hash (Addr.sibling k) in - let sibling_dir = last_direction k in - loop (Addr.parent_exn k) ((sibling, sibling_dir) :: acc) + let dir = last_direction k in + (sibling, dir) :: loop (Addr.parent_exn k) in match location with | Hash addr -> - List.rev (loop addr []) + loop addr | _ -> failwith "can only get merkle path dependencies of a hash location" From 85cf55c35d84aa20a1deeb1bba52be5db7185281 Mon Sep 17 00:00:00 2001 From: mrmr1993 Date: Tue, 7 Nov 2023 11:45:53 +0000 Subject: [PATCH 039/119] Batch account lookups --- src/lib/mina_ledger/sparse_ledger.ml | 39 ++++++++++++++++++---------- 1 file changed, 25 insertions(+), 14 deletions(-) diff --git a/src/lib/mina_ledger/sparse_ledger.ml b/src/lib/mina_ledger/sparse_ledger.ml index 2e9271f0decd..4e3c68171e4f 100644 --- a/src/lib/mina_ledger/sparse_ledger.ml +++ b/src/lib/mina_ledger/sparse_ledger.ml @@ -9,21 +9,32 @@ let of_ledger_root ledger = let of_ledger_subset_exn (oledger : Ledger.t) keys = let ledger = Ledger.copy oledger in let locations = Ledger.location_of_account_batch ledger keys in + let non_empty_locations = List.filter_map ~f:snd locations in + let accounts = Ledger.get_batch ledger non_empty_locations in let _, sparse = - List.fold locations - ~f:(fun (new_keys, sl) (key, loc) -> - match loc with - | Some loc -> - ( new_keys - , add_path sl - (Ledger.merkle_path ledger loc) - key - ( Ledger.get ledger loc - |> Option.value_exn ?here:None ?error:None ?message:None ) ) - | None -> - let path, acct = Ledger.create_empty_exn ledger key in - (key :: new_keys, add_path sl path key acct) ) - ~init:([], of_ledger_root ledger) + let rec go (new_keys, sl) locations accounts = + match locations with + | [] -> + (new_keys, sl) + | (key, Some loc) :: locations -> ( + match accounts with + | (_, account) :: accounts -> + go + ( new_keys + , add_path sl + (Ledger.merkle_path ledger loc) + key + ( account + |> Option.value_exn ?here:None ?error:None ?message:None ) + ) + locations accounts + | _ -> + assert false ) + | (key, None) :: locations -> + let path, acct = Ledger.create_empty_exn ledger key in + go (key :: new_keys, add_path sl path key acct) locations accounts + in + go ([], of_ledger_root ledger) locations accounts in Debug_assert.debug_assert (fun () -> [%test_eq: Ledger_hash.t] From 4271394fe8e90c3ec022852501f19b095b3934a4 Mon Sep 17 00:00:00 2001 From: Nathan Holland Date: Tue, 21 Nov 2023 18:24:02 -0600 Subject: [PATCH 040/119] Refactor sparse ledger of_ledger_subset_exn --- src/lib/mina_ledger/sparse_ledger.ml | 46 +++++++++++++--------------- 1 file changed, 21 insertions(+), 25 deletions(-) diff --git a/src/lib/mina_ledger/sparse_ledger.ml b/src/lib/mina_ledger/sparse_ledger.ml index 4e3c68171e4f..af1fa2e8ba5a 100644 --- a/src/lib/mina_ledger/sparse_ledger.ml +++ b/src/lib/mina_ledger/sparse_ledger.ml @@ -11,36 +11,32 @@ let of_ledger_subset_exn (oledger : Ledger.t) keys = let locations = Ledger.location_of_account_batch ledger keys in let non_empty_locations = List.filter_map ~f:snd locations in let accounts = Ledger.get_batch ledger non_empty_locations in - let _, sparse = - let rec go (new_keys, sl) locations accounts = - match locations with - | [] -> - (new_keys, sl) - | (key, Some loc) :: locations -> ( - match accounts with - | (_, account) :: accounts -> - go - ( new_keys - , add_path sl + let sl, _ = + List.fold locations + ~init:(of_ledger_root ledger, accounts) + ~f:(fun (sl, accounts) (key, location) -> + match location with + | Some loc -> ( + match accounts with + | (_, account) :: rest -> + let sl = + add_path sl (Ledger.merkle_path ledger loc) - key - ( account - |> Option.value_exn ?here:None ?error:None ?message:None ) - ) - locations accounts - | _ -> - assert false ) - | (key, None) :: locations -> - let path, acct = Ledger.create_empty_exn ledger key in - go (key :: new_keys, add_path sl path key acct) locations accounts - in - go ([], of_ledger_root ledger) locations accounts + key (Option.value_exn account) + in + (sl, rest) + | [] -> + failwith "unexpected number of non empty accounts" ) + | None -> + let path, account = Ledger.create_empty_exn ledger key in + let sl = add_path sl path key account in + (sl, accounts) ) in Debug_assert.debug_assert (fun () -> [%test_eq: Ledger_hash.t] (Ledger.merkle_root ledger) - ((merkle_root sparse :> Random_oracle.Digest.t) |> Ledger_hash.of_hash) ) ; - sparse + ((merkle_root sl :> Random_oracle.Digest.t) |> Ledger_hash.of_hash) ) ; + sl let of_ledger_index_subset_exn (ledger : Ledger.Any_ledger.witness) indexes = List.fold indexes From fc58d3c4cc087bf1f000875637c5abdc24ba948b Mon Sep 17 00:00:00 2001 From: mrmr1993 Date: Tue, 7 Nov 2023 13:17:14 +0000 Subject: [PATCH 041/119] Add merkle_path_batch --- src/lib/merkle_ledger/any_ledger.ml | 2 + src/lib/merkle_ledger/base_ledger_intf.ml | 2 + src/lib/merkle_ledger/database.ml | 52 ++++++++++++++++++++++ src/lib/merkle_ledger/null_ledger.ml | 2 + src/lib/merkle_mask/masking_merkle_tree.ml | 10 +++++ 5 files changed, 68 insertions(+) diff --git a/src/lib/merkle_ledger/any_ledger.ml b/src/lib/merkle_ledger/any_ledger.ml index 6ead07061508..656cf6a2f8de 100644 --- a/src/lib/merkle_ledger/any_ledger.ml +++ b/src/lib/merkle_ledger/any_ledger.ml @@ -120,6 +120,8 @@ module Make_base (Inputs : Inputs_intf) : let merkle_path (T ((module Base), t)) = Base.merkle_path t + let merkle_path_batch (T ((module Base), t)) = Base.merkle_path_batch t + let merkle_root (T ((module Base), t)) = Base.merkle_root t let index_of_account_exn (T ((module Base), t)) = diff --git a/src/lib/merkle_ledger/base_ledger_intf.ml b/src/lib/merkle_ledger/base_ledger_intf.ml index a656ff4ffca7..64f4a9348265 100644 --- a/src/lib/merkle_ledger/base_ledger_intf.ml +++ b/src/lib/merkle_ledger/base_ledger_intf.ml @@ -134,6 +134,8 @@ module type S = sig val merkle_path_at_index_exn : t -> int -> Path.t + val merkle_path_batch : t -> Location.t list -> Path.t list + val remove_accounts_exn : t -> account_id list -> unit (** Triggers when the ledger has been detached and should no longer be diff --git a/src/lib/merkle_ledger/database.ml b/src/lib/merkle_ledger/database.ml index 9de0bc7f356d..691af913143f 100644 --- a/src/lib/merkle_ledger/database.ml +++ b/src/lib/merkle_ledger/database.ml @@ -700,6 +700,58 @@ module Make (Inputs : Inputs_intf) : List.map2_exn dependency_dirs dependency_hashes ~f:(fun dir hash -> Direction.map dir ~left:(`Left hash) ~right:(`Right hash) ) + let merkle_path_batch mdb locations = + let locations = + List.map locations ~f:(fun location -> + if Location.is_account location then + Location.Hash (Location.to_path_exn location) + else ( + assert (Location.is_hash location) ; + location ) ) + in + let rev_locations, rev_directions, rev_lengths = + let rec loop locations loc_acc dir_acc length_acc = + match (locations, length_acc) with + | [], _ :: length_acc -> + (loc_acc, dir_acc, length_acc) + | k :: locations, length :: length_acc -> + if Location.height ~ledger_depth:mdb.depth k >= mdb.depth then + loop locations loc_acc dir_acc (0 :: length :: length_acc) + else + let sibling = Location.sibling k in + let sibling_dir = + Location.last_direction (Location.to_path_exn k) + in + loop + (Location.parent k :: locations) + (sibling :: loc_acc) (sibling_dir :: dir_acc) + ((length + 1) :: length_acc) + | _ -> + assert false + in + loop locations [] [] [ 0 ] + in + let rev_hashes = get_hash_batch mdb rev_locations in + let rec loop directions hashes lengths acc = + match (directions, hashes, lengths, acc) with + | [], [], [], _ (* actually [] *) :: acc_tl -> + acc_tl + | _, _, 0 :: lengths, _ -> + loop directions hashes lengths ([] :: acc) + | ( direction :: directions + , hash :: hashes + , length :: lengths + , acc_hd :: acc_tl ) -> + let dir = + Direction.map direction ~left:(`Left hash) ~right:(`Right hash) + in + loop directions hashes ((length - 1) :: lengths) + ((dir :: acc_hd) :: acc_tl) + | _ -> + failwith "Mismatched lengths" + in + loop rev_directions rev_hashes rev_lengths [ [] ] + let merkle_path_at_addr_exn t addr = merkle_path t (Location.Hash addr) let merkle_path_at_index_exn t index = diff --git a/src/lib/merkle_ledger/null_ledger.ml b/src/lib/merkle_ledger/null_ledger.ml index cfc0cf953da2..2d67ffdfa359 100644 --- a/src/lib/merkle_ledger/null_ledger.ml +++ b/src/lib/merkle_ledger/null_ledger.ml @@ -64,6 +64,8 @@ end = struct in loop location + let merkle_path_batch t locations = List.map ~f:(merkle_path t) locations + let merkle_root t = empty_hash_at_height t.depth let merkle_path_at_addr_exn t addr = merkle_path t (Location.Hash addr) diff --git a/src/lib/merkle_mask/masking_merkle_tree.ml b/src/lib/merkle_mask/masking_merkle_tree.ml index 5dd06f6026c2..3bebe4c1352a 100644 --- a/src/lib/merkle_mask/masking_merkle_tree.ml +++ b/src/lib/merkle_mask/masking_merkle_tree.ml @@ -260,6 +260,16 @@ module Make (Inputs : Inputs_intf.S) = struct let parent_merkle_path = Base.merkle_path (get_parent t) location in fixup_merkle_path t parent_merkle_path address + let merkle_path_batch t locations = + assert_is_attached t ; + let addresses = List.map ~f:Location.to_path_exn locations in + let parent_merkle_paths = + Base.merkle_path_batch (get_parent t) locations + in + List.map2_exn + ~f:(fun path address -> fixup_merkle_path t path address) + parent_merkle_paths addresses + (* given a Merkle path corresponding to a starting address, calculate addresses and hashes for each node affected by the starting hash; that is, along the path from the account address to root *) From 5147f719d559ef346ef55da5d3d80794f6bca795 Mon Sep 17 00:00:00 2001 From: mrmr1993 Date: Tue, 7 Nov 2023 13:22:36 +0000 Subject: [PATCH 042/119] Batch merkle_path lookups in Sparse_ledger --- src/lib/mina_ledger/sparse_ledger.ml | 23 +++++++++++------------ 1 file changed, 11 insertions(+), 12 deletions(-) diff --git a/src/lib/mina_ledger/sparse_ledger.ml b/src/lib/mina_ledger/sparse_ledger.ml index af1fa2e8ba5a..10e1f5a1e783 100644 --- a/src/lib/mina_ledger/sparse_ledger.ml +++ b/src/lib/mina_ledger/sparse_ledger.ml @@ -11,26 +11,25 @@ let of_ledger_subset_exn (oledger : Ledger.t) keys = let locations = Ledger.location_of_account_batch ledger keys in let non_empty_locations = List.filter_map ~f:snd locations in let accounts = Ledger.get_batch ledger non_empty_locations in - let sl, _ = + let merkle_paths = Ledger.merkle_path_batch ledger non_empty_locations in + let sl, _, _ = List.fold locations - ~init:(of_ledger_root ledger, accounts) - ~f:(fun (sl, accounts) (key, location) -> + ~init:(of_ledger_root ledger, accounts, merkle_paths) + ~f:(fun (sl, accounts, merkle_paths) (key, location) -> match location with - | Some loc -> ( - match accounts with - | (_, account) :: rest -> + | Some _loc -> ( + match (accounts, merkle_paths) with + | (_, account) :: rest, merkle_path :: rest_merkle_paths -> let sl = - add_path sl - (Ledger.merkle_path ledger loc) - key (Option.value_exn account) + add_path sl merkle_path key (Option.value_exn account) in - (sl, rest) - | [] -> + (sl, rest, rest_merkle_paths) + | _ -> failwith "unexpected number of non empty accounts" ) | None -> let path, account = Ledger.create_empty_exn ledger key in let sl = add_path sl path key account in - (sl, accounts) ) + (sl, accounts, merkle_paths) ) in Debug_assert.debug_assert (fun () -> [%test_eq: Ledger_hash.t] From 0a01bd3e585c32784157157263ae89c799b51de5 Mon Sep 17 00:00:00 2001 From: mrmr1993 Date: Thu, 16 Nov 2023 21:36:47 +0000 Subject: [PATCH 043/119] Return the merkle path from the current mask instead of from the ledger --- src/lib/merkle_mask/masking_merkle_tree.ml | 101 ++++++++++++++++----- 1 file changed, 80 insertions(+), 21 deletions(-) diff --git a/src/lib/merkle_mask/masking_merkle_tree.ml b/src/lib/merkle_mask/masking_merkle_tree.ml index 3bebe4c1352a..4f9afacdaaa8 100644 --- a/src/lib/merkle_mask/masking_merkle_tree.ml +++ b/src/lib/merkle_mask/masking_merkle_tree.ml @@ -211,9 +211,43 @@ module Make (Inputs : Inputs_intf.S) = struct let get_batch = self_find_or_batch_lookup self_find_account Base.get_batch + let self_merkle_path t address = + let location = Location.Account address in + match self_find_account t location with + | None -> + (* Short-circuit: We assume that the merkle path will be in this mask + only if the account is also. + *) + None + | Some _ -> + Option.try_with (fun () -> + let rec self_merkle_path address = + if Addr.height ~ledger_depth:t.depth address >= t.depth then [] + else + let sibling = Addr.sibling address in + let sibling_dir = Location.last_direction sibling in + let hash = + match self_find_hash t sibling with + | Some hash -> + hash + | None -> + (* Caught by [try_with] above. *) assert false + in + let parent_address = + match Addr.parent address with + | Ok addr -> + addr + | Error _ -> + (* Caught by [try_with] above. *) assert false + in + Direction.map sibling_dir ~left:(`Left hash) + ~right:(`Right hash) + :: self_merkle_path parent_address + in + self_merkle_path address ) + (* fixup_merkle_path patches a Merkle path reported by the parent, overriding with hashes which are stored in the mask *) - let fixup_merkle_path t path address = let rec build_fixed_path path address accum = if List.is_empty path then List.rev accum @@ -241,34 +275,59 @@ module Make (Inputs : Inputs_intf.S) = struct let merkle_path_at_addr_exn t address = assert_is_attached t ; - let parent_merkle_path = - Base.merkle_path_at_addr_exn (get_parent t) address - in - fixup_merkle_path t parent_merkle_path address + match self_merkle_path t address with + | Some path -> + path + | None -> + let parent_merkle_path = + Base.merkle_path_at_addr_exn (get_parent t) address + in + fixup_merkle_path t parent_merkle_path address let merkle_path_at_index_exn t index = - assert_is_attached t ; - let address = Addr.of_int_exn ~ledger_depth:t.depth index in - let parent_merkle_path = - Base.merkle_path_at_addr_exn (get_parent t) address - in - fixup_merkle_path t parent_merkle_path address + merkle_path_at_addr_exn t (Addr.of_int_exn ~ledger_depth:t.depth index) let merkle_path t location = - assert_is_attached t ; - let address = Location.to_path_exn location in - let parent_merkle_path = Base.merkle_path (get_parent t) location in - fixup_merkle_path t parent_merkle_path address + merkle_path_at_addr_exn t (Location.to_path_exn location) let merkle_path_batch t locations = assert_is_attached t ; - let addresses = List.map ~f:Location.to_path_exn locations in - let parent_merkle_paths = - Base.merkle_path_batch (get_parent t) locations + let self_merkle_paths_rev = + List.rev_map locations ~f:(fun location -> + let address = Location.to_path_exn location in + match self_merkle_path t address with + | Some path -> + Either.First path + | None -> + Either.Second (location, address) ) + in + let parent_merkle_paths_rev = + let parent_locations_rev = + List.filter_map self_merkle_paths_rev ~f:(function + | Either.First _ -> + None + | Either.Second (location, _) -> + Some location ) + in + if List.is_empty parent_locations_rev then [] + else Base.merkle_path_batch (get_parent t) parent_locations_rev + in + let rec recombine self_merkle_paths_rev parent_merkle_paths_rev acc = + match (self_merkle_paths_rev, parent_merkle_paths_rev) with + | [], [] -> + acc + | Either.First path :: self_merkle_paths_rev, parent_merkle_paths_rev -> + recombine self_merkle_paths_rev parent_merkle_paths_rev (path :: acc) + | ( Either.Second (_, address) :: self_merkle_paths_rev + , path :: parent_merkle_paths_rev ) -> + let path = fixup_merkle_path t path address in + recombine self_merkle_paths_rev parent_merkle_paths_rev (path :: acc) + | _ :: _, [] -> + assert false + | [], _ :: _ -> + assert false in - List.map2_exn - ~f:(fun path address -> fixup_merkle_path t path address) - parent_merkle_paths addresses + recombine self_merkle_paths_rev parent_merkle_paths_rev [] (* given a Merkle path corresponding to a starting address, calculate addresses and hashes for each node affected by the starting hash; that is, From 792b6946588bdfed2a25d9615ef1d276d5bedaaa Mon Sep 17 00:00:00 2001 From: mrmr1993 Date: Fri, 17 Nov 2023 09:35:39 +0000 Subject: [PATCH 044/119] Always attempt to get the merkle path from the ledger mask --- src/lib/merkle_mask/masking_merkle_tree.ml | 55 +++++++++------------- 1 file changed, 23 insertions(+), 32 deletions(-) diff --git a/src/lib/merkle_mask/masking_merkle_tree.ml b/src/lib/merkle_mask/masking_merkle_tree.ml index 4f9afacdaaa8..bcf52bd5b54d 100644 --- a/src/lib/merkle_mask/masking_merkle_tree.ml +++ b/src/lib/merkle_mask/masking_merkle_tree.ml @@ -212,39 +212,30 @@ module Make (Inputs : Inputs_intf.S) = struct let get_batch = self_find_or_batch_lookup self_find_account Base.get_batch let self_merkle_path t address = - let location = Location.Account address in - match self_find_account t location with - | None -> - (* Short-circuit: We assume that the merkle path will be in this mask - only if the account is also. - *) - None - | Some _ -> - Option.try_with (fun () -> - let rec self_merkle_path address = - if Addr.height ~ledger_depth:t.depth address >= t.depth then [] - else - let sibling = Addr.sibling address in - let sibling_dir = Location.last_direction sibling in - let hash = - match self_find_hash t sibling with - | Some hash -> - hash - | None -> - (* Caught by [try_with] above. *) assert false - in - let parent_address = - match Addr.parent address with - | Ok addr -> - addr - | Error _ -> - (* Caught by [try_with] above. *) assert false - in - Direction.map sibling_dir ~left:(`Left hash) - ~right:(`Right hash) - :: self_merkle_path parent_address + Option.try_with (fun () -> + let rec self_merkle_path address = + if Addr.height ~ledger_depth:t.depth address >= t.depth then [] + else + let sibling = Addr.sibling address in + let sibling_dir = Location.last_direction sibling in + let hash = + match self_find_hash t sibling with + | Some hash -> + hash + | None -> + (* Caught by [try_with] above. *) assert false in - self_merkle_path address ) + let parent_address = + match Addr.parent address with + | Ok addr -> + addr + | Error _ -> + (* Caught by [try_with] above. *) assert false + in + Direction.map sibling_dir ~left:(`Left hash) ~right:(`Right hash) + :: self_merkle_path parent_address + in + self_merkle_path address ) (* fixup_merkle_path patches a Merkle path reported by the parent, overriding with hashes which are stored in the mask *) From a636e022f6ed468b4d43a7ce64cecfaa08713695 Mon Sep 17 00:00:00 2001 From: mrmr1993 Date: Fri, 17 Nov 2023 11:18:49 +0000 Subject: [PATCH 045/119] Update doc for copy method of masking ledger --- src/lib/merkle_mask/masking_merkle_tree_intf.ml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/lib/merkle_mask/masking_merkle_tree_intf.ml b/src/lib/merkle_mask/masking_merkle_tree_intf.ml index 84bc6badfd53..e5ca27b99586 100644 --- a/src/lib/merkle_mask/masking_merkle_tree_intf.ml +++ b/src/lib/merkle_mask/masking_merkle_tree_intf.ml @@ -73,9 +73,8 @@ module type S = sig (** called when parent sets an account; update local state *) val parent_set_notify : t -> account -> unit - val copy : t -> t - (* makes new mask instance with copied tables, re-use parent *) + val copy : t -> t (** already have module For_testing from include above *) module For_testing : sig From 93ec85bef0390a89cab02b23c8ccb081eb302340 Mon Sep 17 00:00:00 2001 From: georgeee Date: Tue, 28 Nov 2023 21:29:00 +0100 Subject: [PATCH 046/119] Fixup for self_merkle_path --- src/lib/merkle_mask/masking_merkle_tree.ml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/lib/merkle_mask/masking_merkle_tree.ml b/src/lib/merkle_mask/masking_merkle_tree.ml index bcf52bd5b54d..c91042695f34 100644 --- a/src/lib/merkle_mask/masking_merkle_tree.ml +++ b/src/lib/merkle_mask/masking_merkle_tree.ml @@ -217,7 +217,7 @@ module Make (Inputs : Inputs_intf.S) = struct if Addr.height ~ledger_depth:t.depth address >= t.depth then [] else let sibling = Addr.sibling address in - let sibling_dir = Location.last_direction sibling in + let sibling_dir = Location.last_direction address in let hash = match self_find_hash t sibling with | Some hash -> From d4442c4ee75ede2644055b3bd3539ed5ebcc20ff Mon Sep 17 00:00:00 2001 From: mrmr1993 Date: Fri, 17 Nov 2023 09:40:51 +0000 Subject: [PATCH 047/119] Add helper to preload accounts and merkle paths in masks --- src/lib/merkle_mask/masking_merkle_tree.ml | 38 ++++++++++++++++++++++ 1 file changed, 38 insertions(+) diff --git a/src/lib/merkle_mask/masking_merkle_tree.ml b/src/lib/merkle_mask/masking_merkle_tree.ml index c91042695f34..deabeae8cdd2 100644 --- a/src/lib/merkle_mask/masking_merkle_tree.ml +++ b/src/lib/merkle_mask/masking_merkle_tree.ml @@ -402,6 +402,17 @@ module Make (Inputs : Inputs_intf.S) = struct List.iter addresses_and_hashes ~f:(fun (addr, hash) -> self_set_hash t addr hash ) + let set_merkle_path_unsafe t addr path = + assert_is_attached t ; + ignore + ( List.fold_left ~init:addr (List.rev path) ~f:(fun addr path -> + let addr = Location.Addr.parent_exn addr in + let sibling_addr = Location.Addr.sibling addr in + let hash = match path with `Left hash | `Right hash -> hash in + self_set_hash t sibling_addr hash ; + addr ) + : Location.Addr.t ) + (* if the mask's parent sets an account, we can prune an entry in the mask if the account in the parent is the same in the mask *) let parent_set_notify t account = @@ -613,6 +624,33 @@ module Make (Inputs : Inputs_intf.S) = struct self_find_or_batch_lookup self_find_location Base.location_of_account_batch + let unsafe_preload_accounts_from_parent t account_ids = + assert_is_attached t ; + let locations = + Base.location_of_account_batch (get_parent t) account_ids + in + let non_empty_locations = + List.filter_map locations ~f:(fun (_account_id, location) -> location) + in + let accounts = Base.get_batch (get_parent t) non_empty_locations in + let merkle_paths = + Base.merkle_path_batch (get_parent t) non_empty_locations + in + (* TODO: If we also insert the empty merkle paths corresponding that may + be used by the unmatched account IDs, we can avoid any further disk IO + when accessng this mask. + *) + List.iter2_exn non_empty_locations merkle_paths + ~f:(fun location merkle_path -> + let addr = Location.to_path_exn location in + set_merkle_path_unsafe t addr merkle_path ) ; + List.iter accounts ~f:(fun (location, account) -> + match account with + | None -> + () + | Some account -> + set t location account ) + (* not needed for in-memory mask; in the database, it's currently a NOP *) let make_space_for t = assert_is_attached t ; From 8961e65988bbbbb0c285d2f19f0175e104f3f1be Mon Sep 17 00:00:00 2001 From: mrmr1993 Date: Fri, 17 Nov 2023 11:19:24 +0000 Subject: [PATCH 048/119] Fixup set_merkle_path_unsafe --- src/lib/merkle_mask/masking_merkle_tree.ml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/lib/merkle_mask/masking_merkle_tree.ml b/src/lib/merkle_mask/masking_merkle_tree.ml index deabeae8cdd2..028bf0715c12 100644 --- a/src/lib/merkle_mask/masking_merkle_tree.ml +++ b/src/lib/merkle_mask/masking_merkle_tree.ml @@ -405,9 +405,9 @@ module Make (Inputs : Inputs_intf.S) = struct let set_merkle_path_unsafe t addr path = assert_is_attached t ; ignore - ( List.fold_left ~init:addr (List.rev path) ~f:(fun addr path -> - let addr = Location.Addr.parent_exn addr in + ( List.fold_left ~init:addr path ~f:(fun addr path -> let sibling_addr = Location.Addr.sibling addr in + let addr = Location.Addr.parent_exn addr in let hash = match path with `Left hash | `Right hash -> hash in self_set_hash t sibling_addr hash ; addr ) From ea94c34f44666e882013b4e137740f9517972a57 Mon Sep 17 00:00:00 2001 From: mrmr1993 Date: Tue, 21 Nov 2023 18:17:15 +0000 Subject: [PATCH 049/119] Preload accounts for transactions --- src/lib/staged_ledger/staged_ledger.ml | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/src/lib/staged_ledger/staged_ledger.ml b/src/lib/staged_ledger/staged_ledger.ml index d26739b78cdd..a714941e2b20 100644 --- a/src/lib/staged_ledger/staged_ledger.ml +++ b/src/lib/staged_ledger/staged_ledger.ml @@ -1019,6 +1019,14 @@ module T = struct let new_mask = Ledger.Mask.create ~depth:(Ledger.depth t.ledger) () in let new_ledger = Ledger.register_mask t.ledger new_mask in let transactions, works, commands_count, coinbases = pre_diff_info in + let accounts_accessed = + List.fold_left ~init:Account_id.Set.empty transactions ~f:(fun set txn -> + Account_id.Set.( + union set + (of_list (Transaction.accounts_referenced txn.With_status.data))) ) + |> Set.to_list + in + Ledger.unsafe_preload_accounts_from_parent new_ledger accounts_accessed ; [%log internal] "Update_coinbase_stack" ~metadata: [ ("transactions", `Int (List.length transactions)) From 683f7081e1ddfc6f3e5d6c706471dec770ea7319 Mon Sep 17 00:00:00 2001 From: georgeee Date: Mon, 27 Nov 2023 18:58:56 +0100 Subject: [PATCH 050/119] Fixup: propagate unsafe_preload_accounts_from_parent --- src/lib/merkle_mask/maskable_merkle_tree.ml | 3 +++ src/lib/merkle_mask/maskable_merkle_tree_intf.ml | 3 +++ src/lib/merkle_mask/masking_merkle_tree_intf.ml | 2 ++ src/lib/mina_ledger/ledger.ml | 3 +++ src/lib/mina_ledger/ledger.mli | 3 +++ 5 files changed, 14 insertions(+) diff --git a/src/lib/merkle_mask/maskable_merkle_tree.ml b/src/lib/merkle_mask/maskable_merkle_tree.ml index a251d6d67ba8..45235a1970af 100644 --- a/src/lib/merkle_mask/maskable_merkle_tree.ml +++ b/src/lib/merkle_mask/maskable_merkle_tree.ml @@ -141,6 +141,9 @@ module Make (Inputs : Inputs_intf) = struct Node (summary, List.map masks ~f:(_crawl (module Mask.Attached))) end + let unsafe_preload_accounts_from_parent = + Mask.Attached.unsafe_preload_accounts_from_parent + let register_mask t mask = let attached_mask = Mask.set_parent mask t in List.iter (Uuid.Table.data registered_masks) ~f:(fun ms -> diff --git a/src/lib/merkle_mask/maskable_merkle_tree_intf.ml b/src/lib/merkle_mask/maskable_merkle_tree_intf.ml index b73974bbd5a9..aa447ab9d1cd 100644 --- a/src/lib/merkle_mask/maskable_merkle_tree_intf.ml +++ b/src/lib/merkle_mask/maskable_merkle_tree_intf.ml @@ -15,6 +15,9 @@ module type S = sig val register_mask : t -> unattached_mask -> attached_mask + val unsafe_preload_accounts_from_parent : + attached_mask -> account_id list -> unit + (** raises an exception if mask is not registered *) val unregister_mask_exn : ?grandchildren: diff --git a/src/lib/merkle_mask/masking_merkle_tree_intf.ml b/src/lib/merkle_mask/masking_merkle_tree_intf.ml index e5ca27b99586..85da9cc5d9f9 100644 --- a/src/lib/merkle_mask/masking_merkle_tree_intf.ml +++ b/src/lib/merkle_mask/masking_merkle_tree_intf.ml @@ -76,6 +76,8 @@ module type S = sig (* makes new mask instance with copied tables, re-use parent *) val copy : t -> t + val unsafe_preload_accounts_from_parent : t -> account_id list -> unit + (** already have module For_testing from include above *) module For_testing : sig val location_in_mask : t -> location -> bool diff --git a/src/lib/mina_ledger/ledger.ml b/src/lib/mina_ledger/ledger.ml index 7e00c45d6c88..87fb51b7ae1b 100644 --- a/src/lib/mina_ledger/ledger.ml +++ b/src/lib/mina_ledger/ledger.ml @@ -271,6 +271,9 @@ module Ledger_inner = struct let register_mask t mask = Maskable.register_mask (packed t) mask + let unsafe_preload_accounts_from_parent = + Maskable.unsafe_preload_accounts_from_parent + let unregister_mask_exn ~loc mask = Maskable.unregister_mask_exn ~loc mask let remove_and_reparent_exn t t_as_mask = diff --git a/src/lib/mina_ledger/ledger.mli b/src/lib/mina_ledger/ledger.mli index 6324071567e5..e38703f548be 100644 --- a/src/lib/mina_ledger/ledger.mli +++ b/src/lib/mina_ledger/ledger.mli @@ -80,6 +80,9 @@ include *) val unregister_mask_exn : loc:string -> Mask.Attached.t -> Mask.t +val unsafe_preload_accounts_from_parent : + Mask.Attached.t -> Account_id.t list -> unit + (* The maskable ledger is t = Mask.Attached.t because register/unregister * work off of this type *) type maskable_ledger = t From dcd469e052c7aa152917d9a3d53c047fe5ba7abb Mon Sep 17 00:00:00 2001 From: georgeee Date: Tue, 28 Nov 2023 18:41:11 +0100 Subject: [PATCH 051/119] Add comment for unsafe_preload_accounts_from_parent --- src/lib/merkle_mask/masking_merkle_tree.ml | 7 +++++++ src/lib/merkle_mask/masking_merkle_tree_intf.ml | 7 +++++++ 2 files changed, 14 insertions(+) diff --git a/src/lib/merkle_mask/masking_merkle_tree.ml b/src/lib/merkle_mask/masking_merkle_tree.ml index 028bf0715c12..519309dca621 100644 --- a/src/lib/merkle_mask/masking_merkle_tree.ml +++ b/src/lib/merkle_mask/masking_merkle_tree.ml @@ -624,6 +624,13 @@ module Make (Inputs : Inputs_intf.S) = struct self_find_or_batch_lookup self_find_location Base.location_of_account_batch + (* Adds specified accounts to the mask by laoding them from parent ledger. + + Could be useful for transaction processing when to pre-populate mask with the + accounts used in processing a transaction (or a block) to ensure there are not loaded + from parent on each lookup. I.e. these accounts will be cached in mask and accessing + them during processing of a transaction won't use disk I/O. + *) let unsafe_preload_accounts_from_parent t account_ids = assert_is_attached t ; let locations = diff --git a/src/lib/merkle_mask/masking_merkle_tree_intf.ml b/src/lib/merkle_mask/masking_merkle_tree_intf.ml index 85da9cc5d9f9..6b366cb9d571 100644 --- a/src/lib/merkle_mask/masking_merkle_tree_intf.ml +++ b/src/lib/merkle_mask/masking_merkle_tree_intf.ml @@ -76,6 +76,13 @@ module type S = sig (* makes new mask instance with copied tables, re-use parent *) val copy : t -> t + (* Adds specified accounts to the mask by laoding them from parent ledger. + + Could be useful for transaction processing when to pre-populate mask with the + accounts used in processing a transaction (or a block) to ensure there are not loaded + from parent on each lookup. I.e. these accounts will be cached in mask and accessing + them during processing of a transaction won't use disk I/O. + *) val unsafe_preload_accounts_from_parent : t -> account_id list -> unit (** already have module For_testing from include above *) From 273ff38f5d4a7055cc315860682c3a92402630e9 Mon Sep 17 00:00:00 2001 From: mrmr1993 Date: Mon, 20 Nov 2023 22:39:54 +0000 Subject: [PATCH 052/119] Add get_hash_batch_exn to ledgers --- src/lib/merkle_ledger/any_ledger.ml | 2 ++ src/lib/merkle_ledger/base_ledger_intf.ml | 2 ++ src/lib/merkle_ledger/database.ml | 6 ++-- src/lib/merkle_ledger/null_ledger.ml | 5 ++++ src/lib/merkle_mask/masking_merkle_tree.ml | 33 ++++++++++++++++------ 5 files changed, 36 insertions(+), 12 deletions(-) diff --git a/src/lib/merkle_ledger/any_ledger.ml b/src/lib/merkle_ledger/any_ledger.ml index 656cf6a2f8de..30f4da1c9960 100644 --- a/src/lib/merkle_ledger/any_ledger.ml +++ b/src/lib/merkle_ledger/any_ledger.ml @@ -124,6 +124,8 @@ module Make_base (Inputs : Inputs_intf) : let merkle_root (T ((module Base), t)) = Base.merkle_root t + let get_hash_batch_exn (T ((module Base), t)) = Base.get_hash_batch_exn t + let index_of_account_exn (T ((module Base), t)) = Base.index_of_account_exn t diff --git a/src/lib/merkle_ledger/base_ledger_intf.ml b/src/lib/merkle_ledger/base_ledger_intf.ml index 64f4a9348265..c74408be04f8 100644 --- a/src/lib/merkle_ledger/base_ledger_intf.ml +++ b/src/lib/merkle_ledger/base_ledger_intf.ml @@ -136,6 +136,8 @@ module type S = sig val merkle_path_batch : t -> Location.t list -> Path.t list + val get_hash_batch_exn : t -> Location.t list -> hash list + val remove_accounts_exn : t -> account_id list -> unit (** Triggers when the ledger has been detached and should no longer be diff --git a/src/lib/merkle_ledger/database.ml b/src/lib/merkle_ledger/database.ml index 691af913143f..3f5689c47713 100644 --- a/src/lib/merkle_ledger/database.ml +++ b/src/lib/merkle_ledger/database.ml @@ -154,7 +154,7 @@ module Make (Inputs : Inputs_intf) : | None -> empty_hash (Location.height ~ledger_depth:mdb.depth location) - let get_hash_batch mdb locations = + let get_hash_batch_exn mdb locations = List.iter locations ~f:(fun location -> assert (Location.is_hash location)) ; let hashes = get_bin_batch mdb locations Hash.bin_read_t in List.map2_exn locations hashes ~f:(fun location hash -> @@ -696,7 +696,7 @@ module Make (Inputs : Inputs_intf) : let dependency_locs, dependency_dirs = List.unzip (Location.merkle_path_dependencies_exn location) in - let dependency_hashes = get_hash_batch mdb dependency_locs in + let dependency_hashes = get_hash_batch_exn mdb dependency_locs in List.map2_exn dependency_dirs dependency_hashes ~f:(fun dir hash -> Direction.map dir ~left:(`Left hash) ~right:(`Right hash) ) @@ -731,7 +731,7 @@ module Make (Inputs : Inputs_intf) : in loop locations [] [] [ 0 ] in - let rev_hashes = get_hash_batch mdb rev_locations in + let rev_hashes = get_hash_batch_exn mdb rev_locations in let rec loop directions hashes lengths acc = match (directions, hashes, lengths, acc) with | [], [], [], _ (* actually [] *) :: acc_tl -> diff --git a/src/lib/merkle_ledger/null_ledger.ml b/src/lib/merkle_ledger/null_ledger.ml index 2d67ffdfa359..c756ee03a6a5 100644 --- a/src/lib/merkle_ledger/null_ledger.ml +++ b/src/lib/merkle_ledger/null_ledger.ml @@ -73,6 +73,11 @@ end = struct let merkle_path_at_index_exn t index = merkle_path_at_addr_exn t (Addr.of_int_exn ~ledger_depth:t.depth index) + let get_hash_batch_exn t locations = + List.map locations ~f:(fun location -> + empty_hash_at_height + (Addr.height ~ledger_depth:t.depth (Location.to_path_exn location)) ) + let index_of_account_exn _t = failwith "index_of_account_exn: null ledgers are empty" diff --git a/src/lib/merkle_mask/masking_merkle_tree.ml b/src/lib/merkle_mask/masking_merkle_tree.ml index 519309dca621..0fdb95869fe2 100644 --- a/src/lib/merkle_mask/masking_merkle_tree.ml +++ b/src/lib/merkle_mask/masking_merkle_tree.ml @@ -448,16 +448,31 @@ module Make (Inputs : Inputs_intf.S) = struct assert_is_attached t ; List.map locations ~f:(fun location -> get t location) - (* NB: rocksdb does not support batch reads; is this needed? *) - let get_hash_batch_exn t addrs = + let get_hash_batch_exn t locations = assert_is_attached t ; - List.map addrs ~f:(fun addr -> - match self_find_hash t addr with - | Some account -> - Some account - | None -> ( - try Some (Base.get_inner_hash_at_addr_exn (get_parent t) addr) - with _ -> None ) ) + let self_hashes_rev = + List.rev_map locations ~f:(fun location -> + (location, self_find_hash t (Location.to_path_exn location)) ) + in + let parent_locations_rev = + List.filter_map self_hashes_rev ~f:(fun (location, hash) -> + match hash with None -> Some location | Some _ -> None ) + in + let parent_hashes_rev = + if List.is_empty parent_locations_rev then [] + else Base.get_hash_batch_exn (get_parent t) parent_locations_rev + in + let rec recombine self_hashes_rev parent_hashes_rev acc = + match (self_hashes_rev, parent_hashes_rev) with + | [], [] -> + acc + | (_location, None) :: self_hashes_rev, hash :: parent_hashes_rev + | (_location, Some hash) :: self_hashes_rev, parent_hashes_rev -> + recombine self_hashes_rev parent_hashes_rev (hash :: acc) + | _, [] | [], _ -> + assert false + in + recombine self_hashes_rev parent_hashes_rev [] (* transfer state from mask to parent; flush local state *) let commit t = From 5e71099f9867b15737c2e70570ac0fd9341fd403 Mon Sep 17 00:00:00 2001 From: mrmr1993 Date: Mon, 20 Nov 2023 22:57:17 +0000 Subject: [PATCH 053/119] Preload all hases for the merkle path and the self hashes in masks --- src/lib/merkle_mask/masking_merkle_tree.ml | 64 +++++++++++++--------- 1 file changed, 38 insertions(+), 26 deletions(-) diff --git a/src/lib/merkle_mask/masking_merkle_tree.ml b/src/lib/merkle_mask/masking_merkle_tree.ml index 0fdb95869fe2..a5c882730091 100644 --- a/src/lib/merkle_mask/masking_merkle_tree.ml +++ b/src/lib/merkle_mask/masking_merkle_tree.ml @@ -381,16 +381,20 @@ module Make (Inputs : Inputs_intf.S) = struct List.iter addresses_and_hashes ~f:(fun (addr, hash) -> self_set_hash t addr hash ) - (* a write writes only to the mask, parent is not involved need to update - both account and hash pieces of the mask *) - let set t location account = + let set_account_unsafe t location account = assert_is_attached t ; self_set_account t location account ; (* Update token info. *) let account_id = Account.identifier account in Token_id.Table.set t.token_owners ~key:(Account_id.derive_token_id ~owner:account_id) - ~data:account_id ; + ~data:account_id + + (* a write writes only to the mask, parent is not involved need to update + both account and hash pieces of the mask *) + let set t location account = + assert_is_attached t ; + set_account_unsafe t location account ; (* Update merkle path. *) let account_address = Location.to_path_exn location in let account_hash = Hash.hash_account account in @@ -402,17 +406,6 @@ module Make (Inputs : Inputs_intf.S) = struct List.iter addresses_and_hashes ~f:(fun (addr, hash) -> self_set_hash t addr hash ) - let set_merkle_path_unsafe t addr path = - assert_is_attached t ; - ignore - ( List.fold_left ~init:addr path ~f:(fun addr path -> - let sibling_addr = Location.Addr.sibling addr in - let addr = Location.Addr.parent_exn addr in - let hash = match path with `Left hash | `Right hash -> hash in - self_set_hash t sibling_addr hash ; - addr ) - : Location.Addr.t ) - (* if the mask's parent sets an account, we can prune an entry in the mask if the account in the parent is the same in the mask *) let parent_set_notify t account = @@ -655,23 +648,42 @@ module Make (Inputs : Inputs_intf.S) = struct List.filter_map locations ~f:(fun (_account_id, location) -> location) in let accounts = Base.get_batch (get_parent t) non_empty_locations in - let merkle_paths = - Base.merkle_path_batch (get_parent t) non_empty_locations + let all_hash_locations = + let rec generate_locations account_locations acc = + match account_locations with + | [] -> + acc + | location :: account_locations -> ( + let address = Location.to_path_exn location in + match Addr.parent address with + | Ok parent -> + let sibling = Addr.sibling address in + generate_locations + (Location.Hash parent :: account_locations) + (Location.Hash address :: Location.Hash sibling :: acc) + | Error _ -> + (* This is the root. It's somewhat wasteful to add it for + every account, but makes this logic simpler. + *) + generate_locations account_locations + (Location.Hash address :: acc) ) + in + generate_locations non_empty_locations [] in - (* TODO: If we also insert the empty merkle paths corresponding that may - be used by the unmatched account IDs, we can avoid any further disk IO - when accessng this mask. - *) - List.iter2_exn non_empty_locations merkle_paths - ~f:(fun location merkle_path -> - let addr = Location.to_path_exn location in - set_merkle_path_unsafe t addr merkle_path ) ; + let all_hashes = + Base.get_hash_batch_exn (get_parent t) all_hash_locations + in + (* Batch import merkle paths and self hashes. *) + List.iter2_exn all_hash_locations all_hashes ~f:(fun location hash -> + let address = Location.to_path_exn location in + self_set_hash t address hash ) ; + (* Batch import accounts. *) List.iter accounts ~f:(fun (location, account) -> match account with | None -> () | Some account -> - set t location account ) + set_account_unsafe t location account ) (* not needed for in-memory mask; in the database, it's currently a NOP *) let make_space_for t = From 499fc6f33f7b577c80c098ea076052f8b2781ab6 Mon Sep 17 00:00:00 2001 From: mrmr1993 Date: Mon, 20 Nov 2023 19:54:07 +0000 Subject: [PATCH 054/119] Allow fetching empty merkle paths from masks --- src/lib/merkle_address/merkle_address.ml | 9 +++++++++ src/lib/merkle_address/merkle_address.mli | 4 ++++ src/lib/merkle_mask/dune | 1 + src/lib/merkle_mask/masking_merkle_tree.ml | 19 +++++++++++++++++-- 4 files changed, 31 insertions(+), 2 deletions(-) diff --git a/src/lib/merkle_address/merkle_address.ml b/src/lib/merkle_address/merkle_address.ml index f36dd5b2af09..641c37bdfe0d 100644 --- a/src/lib/merkle_address/merkle_address.ml +++ b/src/lib/merkle_address/merkle_address.ml @@ -215,6 +215,15 @@ let serialize ~ledger_depth path = let is_parent_of parent ~maybe_child = Bitstring.is_prefix maybe_child parent +let same_height_ancestors x y = + let depth_x = depth x in + let depth_y = depth y in + if depth_x < depth_y then (x, slice y 0 depth_x) else (slice x 0 depth_y, y) + +let is_further_right ~than path = + let than, path = same_height_ancestors than path in + compare than path < 0 + module Range = struct type nonrec t = t * t diff --git a/src/lib/merkle_address/merkle_address.mli b/src/lib/merkle_address/merkle_address.mli index 767fc0049617..98d21ad95cf8 100644 --- a/src/lib/merkle_address/merkle_address.mli +++ b/src/lib/merkle_address/merkle_address.mli @@ -74,3 +74,7 @@ val height : ledger_depth:int -> t -> int val to_int : t -> int val of_int_exn : ledger_depth:int -> int -> t + +val same_height_ancestors : t -> t -> t * t + +val is_further_right : than:t -> t -> bool diff --git a/src/lib/merkle_mask/dune b/src/lib/merkle_mask/dune index 1c9070d0db27..02538f4c1322 100644 --- a/src/lib/merkle_mask/dune +++ b/src/lib/merkle_mask/dune @@ -23,6 +23,7 @@ visualization mina_stdlib direction + empty_hashes ) (preprocess (pps diff --git a/src/lib/merkle_mask/masking_merkle_tree.ml b/src/lib/merkle_mask/masking_merkle_tree.ml index a5c882730091..a953a4d08962 100644 --- a/src/lib/merkle_mask/masking_merkle_tree.ml +++ b/src/lib/merkle_mask/masking_merkle_tree.ml @@ -211,10 +211,14 @@ module Make (Inputs : Inputs_intf.S) = struct let get_batch = self_find_or_batch_lookup self_find_account Base.get_batch + let empty_hash = + Empty_hashes.extensible_cache (module Hash) ~init_hash:Hash.empty_account + let self_merkle_path t address = Option.try_with (fun () -> let rec self_merkle_path address = - if Addr.height ~ledger_depth:t.depth address >= t.depth then [] + let height = Addr.height ~ledger_depth:t.depth address in + if height >= t.depth then [] else let sibling = Addr.sibling address in let sibling_dir = Location.last_direction address in @@ -223,7 +227,18 @@ module Make (Inputs : Inputs_intf.S) = struct | Some hash -> hash | None -> - (* Caught by [try_with] above. *) assert false + let is_empty = + match t.current_location with + | None -> + true + | Some current_location -> + let current_address = + Location.to_path_exn current_location + in + Addr.is_further_right ~than:current_address sibling + in + if is_empty then empty_hash height + else (* Caught by [try_with] above. *) assert false in let parent_address = match Addr.parent address with From e8894752bfffc63dbeadd487e17465e1d4436487 Mon Sep 17 00:00:00 2001 From: mrmr1993 Date: Mon, 20 Nov 2023 19:57:27 +0000 Subject: [PATCH 055/119] Handle known-empty accounts in merkle masks without hitting the db --- src/lib/merkle_mask/masking_merkle_tree.ml | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/src/lib/merkle_mask/masking_merkle_tree.ml b/src/lib/merkle_mask/masking_merkle_tree.ml index a953a4d08962..0d7dd505f86c 100644 --- a/src/lib/merkle_mask/masking_merkle_tree.ml +++ b/src/lib/merkle_mask/masking_merkle_tree.ml @@ -181,7 +181,16 @@ module Make (Inputs : Inputs_intf.S) = struct | Some account -> Some account | None -> - Base.get (get_parent t) location + let is_empty = + match t.current_location with + | None -> + true + | Some current_location -> + let address = Location.to_path_exn location in + let current_address = Location.to_path_exn current_location in + Addr.is_further_right ~than:current_address address + in + if is_empty then None else Base.get (get_parent t) location let self_find_or_batch_lookup self_find lookup_parent t ids = assert_is_attached t ; From cc770e5a10a6a820c58a06d6f23aa6ad093ab3ed Mon Sep 17 00:00:00 2001 From: mrmr1993 Date: Mon, 20 Nov 2023 20:19:29 +0000 Subject: [PATCH 056/119] Handle known-empty accounts in get_batch for merkle masks --- src/lib/merkle_mask/masking_merkle_tree.ml | 55 ++++++++++++++-------- 1 file changed, 36 insertions(+), 19 deletions(-) diff --git a/src/lib/merkle_mask/masking_merkle_tree.ml b/src/lib/merkle_mask/masking_merkle_tree.ml index 0d7dd505f86c..3236313c3b75 100644 --- a/src/lib/merkle_mask/masking_merkle_tree.ml +++ b/src/lib/merkle_mask/masking_merkle_tree.ml @@ -194,9 +194,7 @@ module Make (Inputs : Inputs_intf.S) = struct let self_find_or_batch_lookup self_find lookup_parent t ids = assert_is_attached t ; - let self_found_or_none = - List.map ids ~f:(fun id -> (id, self_find t id)) - in + let self_found_or_none = List.map ids ~f:self_find in let not_found = List.filter_map self_found_or_none ~f:(function | id, None -> @@ -205,20 +203,37 @@ module Make (Inputs : Inputs_intf.S) = struct None ) in let from_parent = lookup_parent (get_parent t) not_found in - let _, res = - List.fold_map self_found_or_none ~init:from_parent - ~f:(fun from_parent (id, self_found) -> - match (self_found, from_parent) with - | None, r :: rest -> - (rest, r) - | Some _, _ -> - (from_parent, (id, self_found)) - | _ -> - failwith "unexpected number of results from DB" ) + List.fold_map self_found_or_none ~init:from_parent + ~f:(fun from_parent (id, self_found) -> + match (self_found, from_parent) with + | None, r :: rest -> + (rest, r) + | Some acc_found_locally, _ -> + (from_parent, (id, acc_found_locally)) + | _ -> + failwith "unexpected number of results from DB" ) + |> snd + + let get_batch t = + let self_find id = + let res = self_find_account t id in + let res = + if Option.is_none res then + let is_empty = + match t.current_location with + | None -> + true + | Some current_location -> + let address = Location.to_path_exn id in + let current_address = Location.to_path_exn current_location in + Addr.is_further_right ~than:current_address address + in + Option.some_if is_empty None + else Some res + in + (id, res) in - res - - let get_batch = self_find_or_batch_lookup self_find_account Base.get_batch + self_find_or_batch_lookup self_find Base.get_batch t let empty_hash = Empty_hashes.extensible_cache (module Hash) ~init_hash:Hash.empty_account @@ -652,9 +667,11 @@ module Make (Inputs : Inputs_intf.S) = struct | None -> Base.location_of_account (get_parent t) account_id - let location_of_account_batch = - self_find_or_batch_lookup self_find_location - Base.location_of_account_batch + let location_of_account_batch t = + (* TODO consider handling special case of empty addresses *) + self_find_or_batch_lookup + (fun id -> (id, Option.map ~f:Option.some @@ self_find_location t id)) + Base.location_of_account_batch t (* Adds specified accounts to the mask by laoding them from parent ledger. From 8c66c3eecad8c3a42acd31a94b947cd03456e4dc Mon Sep 17 00:00:00 2001 From: mrmr1993 Date: Tue, 21 Nov 2023 03:49:11 +0000 Subject: [PATCH 057/119] Remove invalid test: mask parents should not be changed --- src/lib/merkle_ledger_tests/test_mask.ml | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/src/lib/merkle_ledger_tests/test_mask.ml b/src/lib/merkle_ledger_tests/test_mask.ml index feed94a7a20e..ce361f9d5266 100644 --- a/src/lib/merkle_ledger_tests/test_mask.ml +++ b/src/lib/merkle_ledger_tests/test_mask.ml @@ -188,17 +188,6 @@ module Make (Test : Test_intf) = struct (* verify all hashes to root are same in mask and parent *) compare_maskable_mask_hashes maskable attached_mask dummy_address ) - let%test "mask delegates to parent" = - Test.with_instances (fun maskable mask -> - let attached_mask = Maskable.register_mask maskable mask in - (* set to parent, get from mask *) - Maskable.set maskable dummy_location dummy_account ; - let mask_result = Mask.Attached.get attached_mask dummy_location in - Option.is_some mask_result - && - let mask_account = Option.value_exn mask_result in - Account.equal dummy_account mask_account ) - let%test "mask prune after parent notification" = Test.with_instances (fun maskable mask -> let attached_mask = Maskable.register_mask maskable mask in From df631b4de94295aa72fe4e839bf2d8b8ddec1073 Mon Sep 17 00:00:00 2001 From: georgeee Date: Tue, 28 Nov 2023 19:12:49 +0100 Subject: [PATCH 058/119] Remove a TODO in location_of_account_batch Justification: we don't have a choice in this function because unlike loading an account by location here we can't tell of an account whether it's a new one or just wasn't loaded from parent. It might have an implication to performance of transaction processing (I do see some db lookups despite the fact that we preload a lot of stuff). But I think we'll handle it via special-casing unsafe_preload function, not right here. --- src/lib/merkle_mask/masking_merkle_tree.ml | 1 - 1 file changed, 1 deletion(-) diff --git a/src/lib/merkle_mask/masking_merkle_tree.ml b/src/lib/merkle_mask/masking_merkle_tree.ml index 3236313c3b75..caae338cd051 100644 --- a/src/lib/merkle_mask/masking_merkle_tree.ml +++ b/src/lib/merkle_mask/masking_merkle_tree.ml @@ -668,7 +668,6 @@ module Make (Inputs : Inputs_intf.S) = struct Base.location_of_account (get_parent t) account_id let location_of_account_batch t = - (* TODO consider handling special case of empty addresses *) self_find_or_batch_lookup (fun id -> (id, Option.map ~f:Option.some @@ self_find_location t id)) Base.location_of_account_batch t From a8b5b8e1a38d949727d9567c6af4903dbb0eb829 Mon Sep 17 00:00:00 2001 From: mrmr1993 Date: Tue, 21 Nov 2023 00:00:56 +0000 Subject: [PATCH 059/119] Avoid ledger copy and mutation in Sparse_ledger.of_ledger_subset_exn --- src/lib/mina_ledger/dune | 1 + src/lib/mina_ledger/sparse_ledger.ml | 70 ++++++++++++++++++++++------ 2 files changed, 58 insertions(+), 13 deletions(-) diff --git a/src/lib/mina_ledger/dune b/src/lib/mina_ledger/dune index cb4afb039755..cb8a7dbaa9ca 100644 --- a/src/lib/mina_ledger/dune +++ b/src/lib/mina_ledger/dune @@ -53,4 +53,5 @@ unsigned_extended with_hash ppx_version.runtime + direction )) diff --git a/src/lib/mina_ledger/sparse_ledger.ml b/src/lib/mina_ledger/sparse_ledger.ml index 10e1f5a1e783..1be2561a5f77 100644 --- a/src/lib/mina_ledger/sparse_ledger.ml +++ b/src/lib/mina_ledger/sparse_ledger.ml @@ -7,15 +7,59 @@ let of_ledger_root ledger = of_root ~depth:(Ledger.depth ledger) (Ledger.merkle_root ledger) let of_ledger_subset_exn (oledger : Ledger.t) keys = - let ledger = Ledger.copy oledger in - let locations = Ledger.location_of_account_batch ledger keys in - let non_empty_locations = List.filter_map ~f:snd locations in - let accounts = Ledger.get_batch ledger non_empty_locations in - let merkle_paths = Ledger.merkle_path_batch ledger non_empty_locations in - let sl, _, _ = + let locations = Ledger.location_of_account_batch oledger keys in + let num_new_accounts, non_empty_locations = + let num_new_accounts = ref 0 in + let non_empty_locations = + List.filter_map locations ~f:(fun (_key, location) -> + if Option.is_none location then incr num_new_accounts ; + location ) + in + (!num_new_accounts, non_empty_locations) + in + let accounts = Ledger.get_batch oledger non_empty_locations in + let merkle_paths, empty_merkle_paths = + let next_location_exn loc = Option.value_exn (Ledger.Location.next loc) in + let empty_address depth = + Ledger.Addr.of_directions @@ List.init depth ~f:(fun _ -> Direction.Left) + in + let merkle_path_locations = + (let rec add_locations remaining last_filled merkle_path_locations = + if remaining > 0 then + let new_location = + match last_filled with + | None -> + Ledger.Location.Account (empty_address (Ledger.depth oledger)) + | Some last_filled -> + next_location_exn last_filled + in + add_locations (remaining - 1) (Some new_location) + (new_location :: merkle_path_locations) + else merkle_path_locations + in + add_locations ) + num_new_accounts + (Ledger.last_filled oledger) + non_empty_locations + in + let merkle_paths = Ledger.merkle_path_batch oledger merkle_path_locations in + (let rec pop_empties num_empties merkle_paths locations acc = + if num_empties <= 0 then (merkle_paths, acc) + else + match (merkle_paths, locations) with + | path :: merkle_paths, location :: locations -> + pop_empties (num_empties - 1) merkle_paths locations + ((location, path) :: acc) + | [], _ | _, [] -> + assert false + in + pop_empties ) + num_new_accounts merkle_paths merkle_path_locations [] + in + let sl, _, _, _ = List.fold locations - ~init:(of_ledger_root ledger, accounts, merkle_paths) - ~f:(fun (sl, accounts, merkle_paths) (key, location) -> + ~init:(of_ledger_root oledger, accounts, merkle_paths, empty_merkle_paths) + ~f:(fun (sl, accounts, merkle_paths, empty_merkle_paths) (key, location) -> match location with | Some _loc -> ( match (accounts, merkle_paths) with @@ -23,17 +67,17 @@ let of_ledger_subset_exn (oledger : Ledger.t) keys = let sl = add_path sl merkle_path key (Option.value_exn account) in - (sl, rest, rest_merkle_paths) + (sl, rest, rest_merkle_paths, empty_merkle_paths) | _ -> failwith "unexpected number of non empty accounts" ) | None -> - let path, account = Ledger.create_empty_exn ledger key in - let sl = add_path sl path key account in - (sl, accounts, merkle_paths) ) + let _, path = List.hd_exn empty_merkle_paths in + let sl = add_path sl path key Account.empty in + (sl, accounts, merkle_paths, List.tl_exn empty_merkle_paths) ) in Debug_assert.debug_assert (fun () -> [%test_eq: Ledger_hash.t] - (Ledger.merkle_root ledger) + (Ledger.merkle_root oledger) ((merkle_root sl :> Random_oracle.Digest.t) |> Ledger_hash.of_hash) ) ; sl From 40060b734337377c76b73ea5048a779be530f5b6 Mon Sep 17 00:00:00 2001 From: Tang Jiawei Date: Wed, 29 Nov 2023 21:30:44 +0800 Subject: [PATCH 060/119] refactor the code a little bit --- src/lib/mina_ledger/sparse_ledger.ml | 17 +++-------------- 1 file changed, 3 insertions(+), 14 deletions(-) diff --git a/src/lib/mina_ledger/sparse_ledger.ml b/src/lib/mina_ledger/sparse_ledger.ml index 1be2561a5f77..0bdac850dfee 100644 --- a/src/lib/mina_ledger/sparse_ledger.ml +++ b/src/lib/mina_ledger/sparse_ledger.ml @@ -18,7 +18,7 @@ let of_ledger_subset_exn (oledger : Ledger.t) keys = (!num_new_accounts, non_empty_locations) in let accounts = Ledger.get_batch oledger non_empty_locations in - let merkle_paths, empty_merkle_paths = + let empty_merkle_paths, merkle_paths = let next_location_exn loc = Option.value_exn (Ledger.Location.next loc) in let empty_address depth = Ledger.Addr.of_directions @@ List.init depth ~f:(fun _ -> Direction.Left) @@ -43,18 +43,7 @@ let of_ledger_subset_exn (oledger : Ledger.t) keys = non_empty_locations in let merkle_paths = Ledger.merkle_path_batch oledger merkle_path_locations in - (let rec pop_empties num_empties merkle_paths locations acc = - if num_empties <= 0 then (merkle_paths, acc) - else - match (merkle_paths, locations) with - | path :: merkle_paths, location :: locations -> - pop_empties (num_empties - 1) merkle_paths locations - ((location, path) :: acc) - | [], _ | _, [] -> - assert false - in - pop_empties ) - num_new_accounts merkle_paths merkle_path_locations [] + List.split_n merkle_paths num_new_accounts in let sl, _, _, _ = List.fold locations @@ -71,7 +60,7 @@ let of_ledger_subset_exn (oledger : Ledger.t) keys = | _ -> failwith "unexpected number of non empty accounts" ) | None -> - let _, path = List.hd_exn empty_merkle_paths in + let path = List.hd_exn empty_merkle_paths in let sl = add_path sl path key Account.empty in (sl, accounts, merkle_paths, List.tl_exn empty_merkle_paths) ) in From 4ae3174ea4a18a0b9f1b09ba71d7dc5446a9a090 Mon Sep 17 00:00:00 2001 From: Tang Jiawei Date: Wed, 29 Nov 2023 21:58:22 +0800 Subject: [PATCH 061/119] further clean up the code --- src/lib/mina_ledger/sparse_ledger.ml | 36 +++++++++++++--------------- 1 file changed, 17 insertions(+), 19 deletions(-) diff --git a/src/lib/mina_ledger/sparse_ledger.ml b/src/lib/mina_ledger/sparse_ledger.ml index 0bdac850dfee..1db5f21c79ff 100644 --- a/src/lib/mina_ledger/sparse_ledger.ml +++ b/src/lib/mina_ledger/sparse_ledger.ml @@ -23,26 +23,24 @@ let of_ledger_subset_exn (oledger : Ledger.t) keys = let empty_address depth = Ledger.Addr.of_directions @@ List.init depth ~f:(fun _ -> Direction.Left) in - let merkle_path_locations = - (let rec add_locations remaining last_filled merkle_path_locations = - if remaining > 0 then - let new_location = - match last_filled with - | None -> - Ledger.Location.Account (empty_address (Ledger.depth oledger)) - | Some last_filled -> - next_location_exn last_filled - in - add_locations (remaining - 1) (Some new_location) - (new_location :: merkle_path_locations) - else merkle_path_locations - in - add_locations ) - num_new_accounts - (Ledger.last_filled oledger) - non_empty_locations + let empty_locations = + let rec add_locations remaining last_filled acc = + if remaining > 0 then + let new_location = + match last_filled with + | None -> + Ledger.Location.Account (empty_address (Ledger.depth oledger)) + | Some last_filled -> + next_location_exn last_filled + in + add_locations (remaining - 1) (Some new_location) (new_location :: acc) + else List.rev acc + in + add_locations num_new_accounts (Ledger.last_filled oledger) [] + in + let merkle_paths = + Ledger.merkle_path_batch oledger (empty_locations @ non_empty_locations) in - let merkle_paths = Ledger.merkle_path_batch oledger merkle_path_locations in List.split_n merkle_paths num_new_accounts in let sl, _, _, _ = From 717ff96724aa6cd6453a04b147826bbdc451d16d Mon Sep 17 00:00:00 2001 From: georgeee Date: Wed, 29 Nov 2023 16:24:04 +0100 Subject: [PATCH 062/119] Small refactoring in of_ledger_subset_exn --- src/lib/mina_ledger/sparse_ledger.ml | 41 ++++++++++++---------------- 1 file changed, 18 insertions(+), 23 deletions(-) diff --git a/src/lib/mina_ledger/sparse_ledger.ml b/src/lib/mina_ledger/sparse_ledger.ml index 1db5f21c79ff..9b2744628b70 100644 --- a/src/lib/mina_ledger/sparse_ledger.ml +++ b/src/lib/mina_ledger/sparse_ledger.ml @@ -8,35 +8,30 @@ let of_ledger_root ledger = let of_ledger_subset_exn (oledger : Ledger.t) keys = let locations = Ledger.location_of_account_batch oledger keys in - let num_new_accounts, non_empty_locations = - let num_new_accounts = ref 0 in - let non_empty_locations = - List.filter_map locations ~f:(fun (_key, location) -> - if Option.is_none location then incr num_new_accounts ; - location ) - in - (!num_new_accounts, non_empty_locations) + let non_empty_locations = List.filter_map locations ~f:snd in + let num_new_accounts = + List.length locations - List.length non_empty_locations in let accounts = Ledger.get_batch oledger non_empty_locations in let empty_merkle_paths, merkle_paths = let next_location_exn loc = Option.value_exn (Ledger.Location.next loc) in - let empty_address depth = - Ledger.Addr.of_directions @@ List.init depth ~f:(fun _ -> Direction.Left) + let empty_address = + Ledger.Addr.of_directions + @@ List.init (Ledger.depth oledger) ~f:(Fn.const Direction.Left) in let empty_locations = - let rec add_locations remaining last_filled acc = - if remaining > 0 then - let new_location = - match last_filled with - | None -> - Ledger.Location.Account (empty_address (Ledger.depth oledger)) - | Some last_filled -> - next_location_exn last_filled - in - add_locations (remaining - 1) (Some new_location) (new_location :: acc) - else List.rev acc - in - add_locations num_new_accounts (Ledger.last_filled oledger) [] + if num_new_accounts = 0 then [] + else + let first_loc = + Option.value_map ~f:next_location_exn + ~default:(Ledger.Location.Account empty_address) + (Ledger.last_filled oledger) + in + let loc = ref first_loc in + first_loc + :: List.init (num_new_accounts - 1) ~f:(fun _ -> + loc := next_location_exn !loc ; + !loc ) in let merkle_paths = Ledger.merkle_path_batch oledger (empty_locations @ non_empty_locations) From c1cefea2129f301c7ec229d07ceac178bb97b194 Mon Sep 17 00:00:00 2001 From: dkijania Date: Wed, 29 Nov 2023 20:26:05 +0100 Subject: [PATCH 063/119] Porting CI fixes from develop and berkeley --- buildkite/scripts/run-snark-transaction-profiler.sh | 8 +------- buildkite/scripts/version-linter.sh | 2 +- buildkite/src/Jobs/Test/DelegationBackendUnitTest.dhall | 3 +++ 3 files changed, 5 insertions(+), 8 deletions(-) diff --git a/buildkite/scripts/run-snark-transaction-profiler.sh b/buildkite/scripts/run-snark-transaction-profiler.sh index 82cb16a3c93a..f7298d9c62d9 100755 --- a/buildkite/scripts/run-snark-transaction-profiler.sh +++ b/buildkite/scripts/run-snark-transaction-profiler.sh @@ -8,13 +8,7 @@ export DEBIAN_FRONTEND=noninteractive apt-get update apt-get install -y git apt-transport-https ca-certificates tzdata curl python3 -case "$BUILDKITE_PULL_REQUEST_BASE_BRANCH" in - rampup|berkeley|release/2.0.0|develop) - TESTNET_NAME="berkeley" - ;; - *) - TESTNET_NAME="mainnet" -esac +TESTNET_NAME="berkeley" git config --global --add safe.directory /workdir diff --git a/buildkite/scripts/version-linter.sh b/buildkite/scripts/version-linter.sh index 603ad0bf10e0..14394da02d55 100755 --- a/buildkite/scripts/version-linter.sh +++ b/buildkite/scripts/version-linter.sh @@ -19,7 +19,7 @@ git config --global --add safe.directory /workdir source buildkite/scripts/handle-fork.sh source buildkite/scripts/export-git-env-vars.sh -pip3 install sexpdata +pip3 install sexpdata==1.0.0 base_branch=${REMOTE}/${BUILDKITE_PULL_REQUEST_BASE_BRANCH} pr_branch=origin/${BUILDKITE_BRANCH} diff --git a/buildkite/src/Jobs/Test/DelegationBackendUnitTest.dhall b/buildkite/src/Jobs/Test/DelegationBackendUnitTest.dhall index f0b8e42f78e8..0c820b798f4c 100644 --- a/buildkite/src/Jobs/Test/DelegationBackendUnitTest.dhall +++ b/buildkite/src/Jobs/Test/DelegationBackendUnitTest.dhall @@ -1,4 +1,6 @@ let ContainerImages = ../../Constants/ContainerImages.dhall +let B = ../../External/Buildkite.dhall +let B/SoftFail = B.definitions/commandStep/properties/soft_fail/Type let Cmd = ../../Lib/Cmds.dhall let S = ../../Lib/SelectFiles.dhall @@ -32,6 +34,7 @@ Pipeline.build ], label = "delegation backend unit-tests", key = "delegation-backend-unit-tests", + soft_fail = Some (B/SoftFail.Boolean True), target = Size.Small, docker = None Docker.Type } From 9c89cca1cdfcbbc487990366e194331f6dc2cb9f Mon Sep 17 00:00:00 2001 From: Deepthi S Kumar Date: Thu, 30 Nov 2023 20:29:46 -0800 Subject: [PATCH 064/119] fix the order in the location list --- src/lib/mina_ledger/sparse_ledger.ml | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/src/lib/mina_ledger/sparse_ledger.ml b/src/lib/mina_ledger/sparse_ledger.ml index 9b2744628b70..82193f76fe0f 100644 --- a/src/lib/mina_ledger/sparse_ledger.ml +++ b/src/lib/mina_ledger/sparse_ledger.ml @@ -6,6 +6,17 @@ module GS = Global_state let of_ledger_root ledger = of_root ~depth:(Ledger.depth ledger) (Ledger.merkle_root ledger) +(*** [iterate_n ~f init n] returns [[f init, f (f init), ..]] of size [n] *) +let iterate_n ~f = + let rec impl prev = function + | 0 -> + [] + | n -> + let r = f prev in + r :: impl r (n - 1) + in + impl + let of_ledger_subset_exn (oledger : Ledger.t) keys = let locations = Ledger.location_of_account_batch oledger keys in let non_empty_locations = List.filter_map locations ~f:snd in @@ -27,11 +38,8 @@ let of_ledger_subset_exn (oledger : Ledger.t) keys = ~default:(Ledger.Location.Account empty_address) (Ledger.last_filled oledger) in - let loc = ref first_loc in first_loc - :: List.init (num_new_accounts - 1) ~f:(fun _ -> - loc := next_location_exn !loc ; - !loc ) + :: iterate_n ~f:next_location_exn first_loc (num_new_accounts - 1) in let merkle_paths = Ledger.merkle_path_batch oledger (empty_locations @ non_empty_locations) From c86d1d596e2cd085a973b382cd9e0ca0f116925d Mon Sep 17 00:00:00 2001 From: mrmr1993 Date: Tue, 21 Nov 2023 14:42:30 +0000 Subject: [PATCH 065/119] Add Sparse_ledger.add_wide_path --- src/lib/sparse_ledger_lib/sparse_ledger.ml | 55 ++++++++++++++++++++++ 1 file changed, 55 insertions(+) diff --git a/src/lib/sparse_ledger_lib/sparse_ledger.ml b/src/lib/sparse_ledger_lib/sparse_ledger.ml index 168d408b614c..fde43966ce3f 100644 --- a/src/lib/sparse_ledger_lib/sparse_ledger.ml +++ b/src/lib/sparse_ledger_lib/sparse_ledger.ml @@ -74,6 +74,13 @@ module type S = sig val add_path : t -> [ `Left of hash | `Right of hash ] list -> account_id -> account -> t + val add_wide_path_unsafe : + t + -> [ `Left of hash * hash | `Right of hash * hash ] list + -> account_id + -> account + -> t + val iteri : t -> f:(int -> account -> unit) -> unit val merkle_root : t -> hash @@ -173,6 +180,54 @@ end = struct ; indexes = (account_id, index) :: t.indexes } + let add_wide_path_unsafe depth0 tree0 path0 account = + let rec build_tree hash height p = + match p with + | `Left (h_l, h_r) :: path -> + let l = build_tree h_l (height - 1) path in + Tree.Node (hash, l, Hash h_r) + | `Right (h_l, h_r) :: path -> + let r = build_tree h_r (height - 1) path in + Node (hash, Hash h_l, r) + | [] -> + assert (height = -1) ; + Account account + in + let rec union height tree path = + match (tree, path) with + | Tree.Hash h, path -> + let t = build_tree h height path in + [%test_result: Hash.t] + ~message: + "Hashes in union are not equal, something is wrong with your \ + ledger" + ~expect:h (hash t) ; + t + | Node (h, l, r), `Left (_h_l, _h_r) :: path -> + let l = union (height - 1) l path in + Node (h, l, r) + | Node (h, l, r), `Right (_h_l, _h_r) :: path -> + let r = union (height - 1) r path in + Node (h, l, r) + | Node _, [] -> + failwith "Path too short" + | Account _, _ :: _ -> + failwith "Path too long" + | Account _, [] -> + tree + in + union (depth0 - 1) tree0 (List.rev path0) + + let add_wide_path_unsafe (t : t) path account_id account = + let index = + List.foldi path ~init:0 ~f:(fun i acc x -> + match x with `Right _ -> acc + (1 lsl i) | `Left _ -> acc ) + in + { t with + tree = add_wide_path_unsafe t.depth t.tree path account + ; indexes = (account_id, index) :: t.indexes + } + let iteri (t : t) ~f = let rec go acc i tree ~f = match tree with From 69b33f2e38ad3c464bc66df8aa3a1d0f39f1ced3 Mon Sep 17 00:00:00 2001 From: mrmr1993 Date: Tue, 21 Nov 2023 17:24:34 +0000 Subject: [PATCH 066/119] Add add_wide_path_batch to Ledger_intf --- src/lib/merkle_ledger/any_ledger.ml | 3 + src/lib/merkle_ledger/base_ledger_intf.ml | 5 + src/lib/merkle_ledger/database.ml | 61 ++++++++++++ src/lib/merkle_ledger/null_ledger.ml | 23 +++++ src/lib/merkle_mask/masking_merkle_tree.ml | 108 +++++++++++++++++++++ src/lib/mina_base/sparse_ledger_base.ml | 1 + src/lib/mina_base/sparse_ledger_base.mli | 11 +++ src/lib/sparse_ledger_lib/sparse_ledger.ml | 4 + 8 files changed, 216 insertions(+) diff --git a/src/lib/merkle_ledger/any_ledger.ml b/src/lib/merkle_ledger/any_ledger.ml index 30f4da1c9960..0541f47acbd4 100644 --- a/src/lib/merkle_ledger/any_ledger.ml +++ b/src/lib/merkle_ledger/any_ledger.ml @@ -122,6 +122,9 @@ module Make_base (Inputs : Inputs_intf) : let merkle_path_batch (T ((module Base), t)) = Base.merkle_path_batch t + let wide_merkle_path_batch (T ((module Base), t)) = + Base.wide_merkle_path_batch t + let merkle_root (T ((module Base), t)) = Base.merkle_root t let get_hash_batch_exn (T ((module Base), t)) = Base.get_hash_batch_exn t diff --git a/src/lib/merkle_ledger/base_ledger_intf.ml b/src/lib/merkle_ledger/base_ledger_intf.ml index c74408be04f8..f85caa808b50 100644 --- a/src/lib/merkle_ledger/base_ledger_intf.ml +++ b/src/lib/merkle_ledger/base_ledger_intf.ml @@ -136,6 +136,11 @@ module type S = sig val merkle_path_batch : t -> Location.t list -> Path.t list + val wide_merkle_path_batch : + t + -> Location.t list + -> [ `Left of hash * hash | `Right of hash * hash ] list list + val get_hash_batch_exn : t -> Location.t list -> hash list val remove_accounts_exn : t -> account_id list -> unit diff --git a/src/lib/merkle_ledger/database.ml b/src/lib/merkle_ledger/database.ml index 3f5689c47713..2ae871960425 100644 --- a/src/lib/merkle_ledger/database.ml +++ b/src/lib/merkle_ledger/database.ml @@ -752,6 +752,67 @@ module Make (Inputs : Inputs_intf) : in loop rev_directions rev_hashes rev_lengths [ [] ] + let wide_merkle_path_batch mdb locations = + let locations = + List.map locations ~f:(fun location -> + if Location.is_account location then + Location.Hash (Location.to_path_exn location) + else ( + assert (Location.is_hash location) ; + location ) ) + in + let rev_locations, rev_directions, rev_lengths = + let rec loop locations loc_acc dir_acc length_acc = + match (locations, length_acc) with + | [], _ :: length_acc -> + (loc_acc, dir_acc, length_acc) + | k :: locations, length :: length_acc -> + if Location.height ~ledger_depth:mdb.depth k >= mdb.depth then + loop locations loc_acc dir_acc (0 :: length :: length_acc) + else + let sibling = Location.sibling k in + let sibling_dir = + Location.last_direction (Location.to_path_exn k) + in + let loc_acc = + match sibling_dir with + | Direction.Left -> + sibling :: k :: loc_acc + | Direction.Right -> + k :: sibling :: loc_acc + in + loop + (Location.parent k :: locations) + loc_acc (sibling_dir :: dir_acc) + ((length + 1) :: length_acc) + | _ -> + assert false + in + loop locations [] [] [ 0 ] + in + let rev_hashes = get_hash_batch_exn mdb rev_locations in + let rec loop directions hashes lengths acc = + match (directions, hashes, lengths, acc) with + | [], [], [], _ (* actually [] *) :: acc_tl -> + acc_tl + | _, _, 0 :: lengths, _ -> + loop directions hashes lengths ([] :: acc) + | ( direction :: directions + , hash_l :: hash_r :: hashes + , length :: lengths + , acc_hd :: acc_tl ) -> + let dir = + Direction.map direction + ~left:(`Left (hash_l, hash_r)) + ~right:(`Right (hash_l, hash_r)) + in + loop directions hashes ((length - 1) :: lengths) + ((dir :: acc_hd) :: acc_tl) + | _ -> + failwith "Mismatched lengths" + in + loop rev_directions rev_hashes rev_lengths [ [] ] + let merkle_path_at_addr_exn t addr = merkle_path t (Location.Hash addr) let merkle_path_at_index_exn t index = diff --git a/src/lib/merkle_ledger/null_ledger.ml b/src/lib/merkle_ledger/null_ledger.ml index c756ee03a6a5..ac9dd31e6b43 100644 --- a/src/lib/merkle_ledger/null_ledger.ml +++ b/src/lib/merkle_ledger/null_ledger.ml @@ -66,6 +66,29 @@ end = struct let merkle_path_batch t locations = List.map ~f:(merkle_path t) locations + let wide_merkle_path t location = + let location = + if Location.is_account location then + Location.Hash (Location.to_path_exn location) + else location + in + assert (Location.is_hash location) ; + let rec loop k = + let h = Location.height ~ledger_depth:t.depth k in + if h >= t.depth then [] + else + let sibling_dir = Location.last_direction (Location.to_path_exn k) in + let hash = empty_hash_at_height h in + Direction.map sibling_dir + ~left:(`Left (hash, hash)) + ~right:(`Right (hash, hash)) + :: loop (Location.parent k) + in + loop location + + let wide_merkle_path_batch t locations = + List.map ~f:(wide_merkle_path t) locations + let merkle_root t = empty_hash_at_height t.depth let merkle_path_at_addr_exn t addr = merkle_path t (Location.Hash addr) diff --git a/src/lib/merkle_mask/masking_merkle_tree.ml b/src/lib/merkle_mask/masking_merkle_tree.ml index caae338cd051..5c0845d43a6b 100644 --- a/src/lib/merkle_mask/masking_merkle_tree.ml +++ b/src/lib/merkle_mask/masking_merkle_tree.ml @@ -276,6 +276,48 @@ module Make (Inputs : Inputs_intf.S) = struct in self_merkle_path address ) + let self_wide_merkle_path t address = + Option.try_with (fun () -> + let rec self_wide_merkle_path address = + let height = Addr.height ~ledger_depth:t.depth address in + if height >= t.depth then [] + else + let sibling = Addr.sibling address in + let sibling_dir = Location.last_direction sibling in + let get_hash addr = + match self_find_hash t addr with + | Some hash -> + hash + | None -> + let is_empty = + match t.current_location with + | None -> + true + | Some current_location -> + let current_address = + Location.to_path_exn current_location + in + Addr.is_further_right ~than:current_address addr + in + if is_empty then empty_hash height + else (* Caught by [try_with] above. *) assert false + in + let sibling_hash = get_hash sibling in + let self_hash = get_hash address in + let parent_address = + match Addr.parent address with + | Ok addr -> + addr + | Error _ -> + (* Caught by [try_with] above. *) assert false + in + Direction.map sibling_dir + ~left:(`Right (sibling_hash, self_hash)) + ~right:(`Left (self_hash, sibling_hash)) + :: self_wide_merkle_path parent_address + in + self_wide_merkle_path address ) + (* fixup_merkle_path patches a Merkle path reported by the parent, overriding with hashes which are stored in the mask *) let fixup_merkle_path t path address = @@ -300,6 +342,33 @@ module Make (Inputs : Inputs_intf.S) = struct in build_fixed_path path address [] + (* fixup_merkle_path patches a Merkle path reported by the parent, + overriding with hashes which are stored in the mask *) + let fixup_wide_merkle_path t path address = + let rec build_fixed_path path address accum = + if List.is_empty path then List.rev accum + else + (* first element in the path contains hash at sibling of address *) + let curr_element = List.hd_exn path in + let merkle_node_address = Addr.sibling address in + let self_mask_hash = self_find_hash t address in + let sibling_mask_hash = self_find_hash t merkle_node_address in + let new_element = + match curr_element with + | `Left (h_l, h_r) -> + `Left + ( Option.value self_mask_hash ~default:h_l + , Option.value sibling_mask_hash ~default:h_r ) + | `Right (h_l, h_r) -> + `Right + ( Option.value sibling_mask_hash ~default:h_l + , Option.value self_mask_hash ~default:h_r ) + in + build_fixed_path (List.tl_exn path) (Addr.parent_exn address) + (new_element :: accum) + in + build_fixed_path path address [] + (* the following merkle_path_* functions report the Merkle path for the mask *) @@ -359,6 +428,45 @@ module Make (Inputs : Inputs_intf.S) = struct in recombine self_merkle_paths_rev parent_merkle_paths_rev [] + let wide_merkle_path_batch t locations = + assert_is_attached t ; + let self_merkle_paths_rev = + List.rev_map locations ~f:(fun location -> + let address = Location.to_path_exn location in + match self_wide_merkle_path t address with + | Some path -> + Either.First path + | None -> + Either.Second (location, address) ) + in + let parent_merkle_paths_rev = + let parent_locations_rev = + List.filter_map self_merkle_paths_rev ~f:(function + | Either.First _ -> + None + | Either.Second (location, _) -> + Some location ) + in + if List.is_empty parent_locations_rev then [] + else Base.wide_merkle_path_batch (get_parent t) parent_locations_rev + in + let rec recombine self_merkle_paths_rev parent_merkle_paths_rev acc = + match (self_merkle_paths_rev, parent_merkle_paths_rev) with + | [], [] -> + acc + | Either.First path :: self_merkle_paths_rev, parent_merkle_paths_rev -> + recombine self_merkle_paths_rev parent_merkle_paths_rev (path :: acc) + | ( Either.Second (_, address) :: self_merkle_paths_rev + , path :: parent_merkle_paths_rev ) -> + let path = fixup_wide_merkle_path t path address in + recombine self_merkle_paths_rev parent_merkle_paths_rev (path :: acc) + | _ :: _, [] -> + assert false + | [], _ :: _ -> + assert false + in + recombine self_merkle_paths_rev parent_merkle_paths_rev [] + (* given a Merkle path corresponding to a starting address, calculate addresses and hashes for each node affected by the starting hash; that is, along the path from the account address to root *) diff --git a/src/lib/mina_base/sparse_ledger_base.ml b/src/lib/mina_base/sparse_ledger_base.ml index 649cff6309c7..d8fe3128431b 100644 --- a/src/lib/mina_base/sparse_ledger_base.ml +++ b/src/lib/mina_base/sparse_ledger_base.ml @@ -153,6 +153,7 @@ M. , set_exn , find_index_exn , add_path + , add_wide_path_unsafe , merkle_root , iteri )] diff --git a/src/lib/mina_base/sparse_ledger_base.mli b/src/lib/mina_base/sparse_ledger_base.mli index 3c30e910a124..6bea8d7a0345 100644 --- a/src/lib/mina_base/sparse_ledger_base.mli +++ b/src/lib/mina_base/sparse_ledger_base.mli @@ -59,6 +59,17 @@ val add_path : -> Account.t -> t +(** Same as [add_path], but using the hashes provided in the wide merkle path + instead of recomputing them. + This is unsafe: the hashes are not checked or recomputed. +*) +val add_wide_path_unsafe : + t + -> [ `Left of Field.t * Field.t | `Right of Field.t * Field.t ] list + -> Account_id.t + -> Account.t + -> t + val iteri : t -> f:(Account.Index.t -> Account.t -> unit) -> unit val handler : t -> Handler.t Staged.t diff --git a/src/lib/sparse_ledger_lib/sparse_ledger.ml b/src/lib/sparse_ledger_lib/sparse_ledger.ml index fde43966ce3f..701f0219f1ed 100644 --- a/src/lib/sparse_ledger_lib/sparse_ledger.ml +++ b/src/lib/sparse_ledger_lib/sparse_ledger.ml @@ -74,6 +74,10 @@ module type S = sig val add_path : t -> [ `Left of hash | `Right of hash ] list -> account_id -> account -> t + (** Same as [add_path], but using the hashes provided in the wide merkle path + instead of recomputing them. + This is unsafe: the hashes are not checked or recomputed. + *) val add_wide_path_unsafe : t -> [ `Left of hash * hash | `Right of hash * hash ] list From 91133a2b61bd8ae8b60dfac7eccdf8bc7d2b5ef7 Mon Sep 17 00:00:00 2001 From: mrmr1993 Date: Tue, 21 Nov 2023 17:33:10 +0000 Subject: [PATCH 067/119] Use wide ledger paths for `Sparse_ledger.of_ledger_subset_exn` --- src/lib/mina_ledger/sparse_ledger.ml | 29 +++++++++++++++------------- 1 file changed, 16 insertions(+), 13 deletions(-) diff --git a/src/lib/mina_ledger/sparse_ledger.ml b/src/lib/mina_ledger/sparse_ledger.ml index 82193f76fe0f..c33c4350a8ba 100644 --- a/src/lib/mina_ledger/sparse_ledger.ml +++ b/src/lib/mina_ledger/sparse_ledger.ml @@ -24,7 +24,7 @@ let of_ledger_subset_exn (oledger : Ledger.t) keys = List.length locations - List.length non_empty_locations in let accounts = Ledger.get_batch oledger non_empty_locations in - let empty_merkle_paths, merkle_paths = + let wide_merkle_paths, empty_merkle_paths = let next_location_exn loc = Option.value_exn (Ledger.Location.next loc) in let empty_address = Ledger.Addr.of_directions @@ -41,29 +41,32 @@ let of_ledger_subset_exn (oledger : Ledger.t) keys = first_loc :: iterate_n ~f:next_location_exn first_loc (num_new_accounts - 1) in - let merkle_paths = - Ledger.merkle_path_batch oledger (empty_locations @ non_empty_locations) + let paths = + Ledger.wide_merkle_path_batch oledger + (empty_locations @ non_empty_locations) in - List.split_n merkle_paths num_new_accounts + List.split_n paths num_new_accounts in let sl, _, _, _ = List.fold locations - ~init:(of_ledger_root oledger, accounts, merkle_paths, empty_merkle_paths) - ~f:(fun (sl, accounts, merkle_paths, empty_merkle_paths) (key, location) -> + ~init: + (of_ledger_root oledger, accounts, wide_merkle_paths, empty_merkle_paths) + ~f:(fun (sl, accounts, paths, empty_paths) (key, location) -> match location with | Some _loc -> ( - match (accounts, merkle_paths) with - | (_, account) :: rest, merkle_path :: rest_merkle_paths -> + match (accounts, paths) with + | (_, account) :: rest, merkle_path :: rest_paths -> let sl = - add_path sl merkle_path key (Option.value_exn account) + add_wide_path_unsafe sl merkle_path key + (Option.value_exn account) in - (sl, rest, rest_merkle_paths, empty_merkle_paths) + (sl, rest, rest_paths, empty_paths) | _ -> failwith "unexpected number of non empty accounts" ) | None -> - let path = List.hd_exn empty_merkle_paths in - let sl = add_path sl path key Account.empty in - (sl, accounts, merkle_paths, List.tl_exn empty_merkle_paths) ) + let path = List.hd_exn empty_paths in + let sl = add_wide_path_unsafe sl path key Account.empty in + (sl, accounts, paths, List.tl_exn empty_paths) ) in Debug_assert.debug_assert (fun () -> [%test_eq: Ledger_hash.t] From 52003ed510abbef4b4285c61ddaaae965cb68fae Mon Sep 17 00:00:00 2001 From: georgeee Date: Mon, 27 Nov 2023 16:04:45 +0100 Subject: [PATCH 068/119] Refactor wide merkle path functions Includes a bugfix in Merkle_ledger.Database.wide_merkle_path_batch --- src/lib/merkle_ledger/database.ml | 150 ++++------ src/lib/merkle_mask/masking_merkle_tree.ml | 321 ++++++++------------- src/lib/mina_ledger/sparse_ledger.ml | 42 ++- src/lib/sparse_ledger_lib/dune | 2 + src/lib/sparse_ledger_lib/sparse_ledger.ml | 59 ++-- 5 files changed, 230 insertions(+), 344 deletions(-) diff --git a/src/lib/merkle_ledger/database.ml b/src/lib/merkle_ledger/database.ml index 2ae871960425..8a29790e4aa1 100644 --- a/src/lib/merkle_ledger/database.ml +++ b/src/lib/merkle_ledger/database.ml @@ -700,118 +700,88 @@ module Make (Inputs : Inputs_intf) : List.map2_exn dependency_dirs dependency_hashes ~f:(fun dir hash -> Direction.map dir ~left:(`Left hash) ~right:(`Right hash) ) - let merkle_path_batch mdb locations = + let path_batch_impl ~update_locs ~extract_hashes_exn mdb locations = let locations = - List.map locations ~f:(fun location -> - if Location.is_account location then - Location.Hash (Location.to_path_exn location) - else ( - assert (Location.is_hash location) ; - location ) ) + List.map locations ~f:(fun loc -> + let loc' = + if Location.is_account loc then + Location.Hash (Location.to_path_exn loc) + else ( + assert (Location.is_hash loc) ; + loc ) + in + (loc', mdb.depth - Location.height ~ledger_depth:mdb.depth loc', 0) ) in - let rev_locations, rev_directions, rev_lengths = + let rev_location_query, rev_directions, rev_lengths = + (* This loop is equivalent to: + 1. collecting location path for every location from `locations` + 2. updating the query with `update_locs` for every entry in every path from step 1. + 3. remembering length structure to be able to recover correspondence of hash lookups + to `locations` + 4. remembering directions corresponding to entries of location paths from step 1. + *) let rec loop locations loc_acc dir_acc length_acc = match (locations, length_acc) with - | [], _ :: length_acc -> + | [], length_acc -> (loc_acc, dir_acc, length_acc) - | k :: locations, length :: length_acc -> - if Location.height ~ledger_depth:mdb.depth k >= mdb.depth then - loop locations loc_acc dir_acc (0 :: length :: length_acc) - else - let sibling = Location.sibling k in - let sibling_dir = - Location.last_direction (Location.to_path_exn k) - in - loop - (Location.parent k :: locations) - (sibling :: loc_acc) (sibling_dir :: dir_acc) - ((length + 1) :: length_acc) - | _ -> - assert false + | (_, 0, length) :: locations, length_acc -> + (* We found a root, all locations for it were added, + just the length needs to be added to length accumulator *) + loop locations loc_acc dir_acc (length :: length_acc) + | (k, depth, length) :: locations, length_acc -> + let sibling_dir = + Location.last_direction (Location.to_path_exn k) + in + loop + ((Location.parent k, depth - 1, length + 1) :: locations) + (update_locs k loc_acc) (sibling_dir :: dir_acc) length_acc in - loop locations [] [] [ 0 ] + loop locations [] [] [] in - let rev_hashes = get_hash_batch_exn mdb rev_locations in + (* Batch-request hashes to answer the query `rev_locations` *) + let rev_hashes = get_hash_batch_exn mdb rev_location_query in + (* Reconstruct merkle paths from response, query, lengths structure and directions *) let rec loop directions hashes lengths acc = - match (directions, hashes, lengths, acc) with - | [], [], [], _ (* actually [] *) :: acc_tl -> + match (lengths, directions, hashes, acc) with + | [], [], [], [] :: acc_tl -> acc_tl - | _, _, 0 :: lengths, _ -> + | 0 :: lengths, _, _, _ -> loop directions hashes lengths ([] :: acc) - | ( direction :: directions - , hash :: hashes - , length :: lengths - , acc_hd :: acc_tl ) -> + | length :: lengths, direction :: directions, hashes, acc_hd :: acc_tl -> + let entry, rest_hashes = extract_hashes_exn ~direction hashes in let dir = - Direction.map direction ~left:(`Left hash) ~right:(`Right hash) + Direction.map direction ~left:(`Left entry) ~right:(`Right entry) in - loop directions hashes ((length - 1) :: lengths) + loop directions rest_hashes ((length - 1) :: lengths) ((dir :: acc_hd) :: acc_tl) | _ -> failwith "Mismatched lengths" in loop rev_directions rev_hashes rev_lengths [ [] ] - let wide_merkle_path_batch mdb locations = - let locations = - List.map locations ~f:(fun location -> - if Location.is_account location then - Location.Hash (Location.to_path_exn location) - else ( - assert (Location.is_hash location) ; - location ) ) - in - let rev_locations, rev_directions, rev_lengths = - let rec loop locations loc_acc dir_acc length_acc = - match (locations, length_acc) with - | [], _ :: length_acc -> - (loc_acc, dir_acc, length_acc) - | k :: locations, length :: length_acc -> - if Location.height ~ledger_depth:mdb.depth k >= mdb.depth then - loop locations loc_acc dir_acc (0 :: length :: length_acc) - else - let sibling = Location.sibling k in - let sibling_dir = - Location.last_direction (Location.to_path_exn k) - in - let loc_acc = - match sibling_dir with - | Direction.Left -> - sibling :: k :: loc_acc - | Direction.Right -> - k :: sibling :: loc_acc - in - loop - (Location.parent k :: locations) - loc_acc (sibling_dir :: dir_acc) - ((length + 1) :: length_acc) - | _ -> - assert false - in - loop locations [] [] [ 0 ] + let merkle_path_batch = + let update_locs = Fn.compose List.cons Location.sibling in + let extract_hashes_exn ~direction:_ hs = (List.hd_exn hs, List.tl_exn hs) in + path_batch_impl ~update_locs ~extract_hashes_exn + + let wide_merkle_path_batch = + let update_locs k = + Fn.compose (List.cons (Location.sibling k)) (List.cons k) in - let rev_hashes = get_hash_batch_exn mdb rev_locations in - let rec loop directions hashes lengths acc = - match (directions, hashes, lengths, acc) with - | [], [], [], _ (* actually [] *) :: acc_tl -> - acc_tl - | _, _, 0 :: lengths, _ -> - loop directions hashes lengths ([] :: acc) - | ( direction :: directions - , hash_l :: hash_r :: hashes - , length :: lengths - , acc_hd :: acc_tl ) -> - let dir = - Direction.map direction - ~left:(`Left (hash_l, hash_r)) - ~right:(`Right (hash_l, hash_r)) + let extract_hashes_exn ~direction = function + | sibling :: self :: rest -> + let el = + match direction with + | Direction.Left -> + (self, sibling) + | Right -> + (sibling, self) in - loop directions hashes ((length - 1) :: lengths) - ((dir :: acc_hd) :: acc_tl) + (el, rest) | _ -> - failwith "Mismatched lengths" + failwith "wide_merkle_path_batch: mismatched lengths" in - loop rev_directions rev_hashes rev_lengths [ [] ] + path_batch_impl ~update_locs ~extract_hashes_exn let merkle_path_at_addr_exn t addr = merkle_path t (Location.Hash addr) diff --git a/src/lib/merkle_mask/masking_merkle_tree.ml b/src/lib/merkle_mask/masking_merkle_tree.ml index 5c0845d43a6b..bee2dcb8257a 100644 --- a/src/lib/merkle_mask/masking_merkle_tree.ml +++ b/src/lib/merkle_mask/masking_merkle_tree.ml @@ -220,13 +220,11 @@ module Make (Inputs : Inputs_intf.S) = struct let res = if Option.is_none res then let is_empty = - match t.current_location with - | None -> - true - | Some current_location -> + Option.value_map ~default:true t.current_location + ~f:(fun current_location -> let address = Location.to_path_exn id in let current_address = Location.to_path_exn current_location in - Addr.is_further_right ~than:current_address address + Addr.is_further_right ~than:current_address address ) in Option.some_if is_empty None else Some res @@ -238,150 +236,112 @@ module Make (Inputs : Inputs_intf.S) = struct let empty_hash = Empty_hashes.extensible_cache (module Hash) ~init_hash:Hash.empty_account - let self_merkle_path t address = - Option.try_with (fun () -> - let rec self_merkle_path address = - let height = Addr.height ~ledger_depth:t.depth address in - if height >= t.depth then [] - else - let sibling = Addr.sibling address in - let sibling_dir = Location.last_direction address in - let hash = - match self_find_hash t sibling with - | Some hash -> - hash - | None -> - let is_empty = - match t.current_location with - | None -> - true - | Some current_location -> - let current_address = - Location.to_path_exn current_location - in - Addr.is_further_right ~than:current_address sibling - in - if is_empty then empty_hash height - else (* Caught by [try_with] above. *) assert false - in - let parent_address = - match Addr.parent address with - | Ok addr -> - addr - | Error _ -> - (* Caught by [try_with] above. *) assert false - in - Direction.map sibling_dir ~left:(`Left hash) ~right:(`Right hash) - :: self_merkle_path parent_address - in - self_merkle_path address ) - - let self_wide_merkle_path t address = - Option.try_with (fun () -> - let rec self_wide_merkle_path address = - let height = Addr.height ~ledger_depth:t.depth address in - if height >= t.depth then [] - else - let sibling = Addr.sibling address in - let sibling_dir = Location.last_direction sibling in - let get_hash addr = - match self_find_hash t addr with - | Some hash -> - hash - | None -> - let is_empty = - match t.current_location with - | None -> - true - | Some current_location -> - let current_address = - Location.to_path_exn current_location - in - Addr.is_further_right ~than:current_address addr - in - if is_empty then empty_hash height - else (* Caught by [try_with] above. *) assert false - in - let sibling_hash = get_hash sibling in - let self_hash = get_hash address in - let parent_address = - match Addr.parent address with - | Ok addr -> - addr - | Error _ -> - (* Caught by [try_with] above. *) assert false - in - Direction.map sibling_dir - ~left:(`Right (sibling_hash, self_hash)) - ~right:(`Left (self_hash, sibling_hash)) - :: self_wide_merkle_path parent_address + let self_path_get_hash ~hashes ~current_location height address = + match Hashtbl.find hashes address with + | Some hash -> + Some hash + | None -> + let is_empty = + match current_location with + | None -> + true + | Some current_location -> + let current_address = Location.to_path_exn current_location in + Addr.is_further_right ~than:current_address address in - self_wide_merkle_path address ) + if is_empty then Some (empty_hash height) else None + + let rec self_path_impl ~element ~depth address = + let height = Addr.height ~ledger_depth:depth address in + if height >= depth then Some [] + else + let%bind.Option el = element height address in + let%bind.Option parent_address = Addr.parent address |> Or_error.ok in + let%map.Option rest = self_path_impl ~element ~depth parent_address in + el :: rest + + let self_merkle_path ~hashes ~current_location = + let element height address = + let sibling = Addr.sibling address in + let sibling_dir = Location.last_direction address in + let%map.Option hash = + self_path_get_hash ~hashes ~current_location height sibling + in + Direction.map sibling_dir ~left:(`Left hash) ~right:(`Right hash) + in + self_path_impl ~element + + let self_wide_merkle_path ~hashes ~current_location = + let element height address = + let sibling = Addr.sibling address in + let sibling_dir = Location.last_direction address in + let%bind.Option sibling_hash = + self_path_get_hash ~hashes ~current_location height sibling + in + let%map.Option self_hash = + self_path_get_hash ~hashes ~current_location height address + in + Direction.map sibling_dir + ~left:(`Left (self_hash, sibling_hash)) + ~right:(`Right (sibling_hash, self_hash)) + in + self_path_impl ~element (* fixup_merkle_path patches a Merkle path reported by the parent, overriding with hashes which are stored in the mask *) - let fixup_merkle_path t path address = - let rec build_fixed_path path address accum = - if List.is_empty path then List.rev accum - else - (* first element in the path contains hash at sibling of address *) - let curr_element = List.hd_exn path in - let merkle_node_address = Addr.sibling address in - let mask_hash = self_find_hash t merkle_node_address in - let parent_hash = match curr_element with `Left h | `Right h -> h in - let new_hash = Option.value mask_hash ~default:parent_hash in - let new_element = - match curr_element with - | `Left _ -> - `Left new_hash - | `Right _ -> - `Right new_hash - in - build_fixed_path (List.tl_exn path) (Addr.parent_exn address) - (new_element :: accum) + let fixup_merkle_path ~hashes ~address:init = + let f address = + (* first element in the path contains hash at sibling of address *) + let sibling_mask_hash = Hashtbl.find hashes (Addr.sibling address) in + let parent_addr = Addr.parent_exn address in + let open Option in + function + | `Left h -> + (parent_addr, `Left (value sibling_mask_hash ~default:h)) + | `Right h -> + (parent_addr, `Right (value sibling_mask_hash ~default:h)) in - build_fixed_path path address [] + Fn.compose snd @@ List.fold_map ~init ~f (* fixup_merkle_path patches a Merkle path reported by the parent, overriding with hashes which are stored in the mask *) - let fixup_wide_merkle_path t path address = - let rec build_fixed_path path address accum = - if List.is_empty path then List.rev accum - else - (* first element in the path contains hash at sibling of address *) - let curr_element = List.hd_exn path in - let merkle_node_address = Addr.sibling address in - let self_mask_hash = self_find_hash t address in - let sibling_mask_hash = self_find_hash t merkle_node_address in - let new_element = - match curr_element with - | `Left (h_l, h_r) -> - `Left - ( Option.value self_mask_hash ~default:h_l - , Option.value sibling_mask_hash ~default:h_r ) - | `Right (h_l, h_r) -> - `Right - ( Option.value sibling_mask_hash ~default:h_l - , Option.value self_mask_hash ~default:h_r ) - in - build_fixed_path (List.tl_exn path) (Addr.parent_exn address) - (new_element :: accum) + let fixup_wide_merkle_path ~hashes ~address:init = + let f address = + (* element in the path contains hash at sibling of address *) + let sibling_mask_hash = Hashtbl.find hashes (Addr.sibling address) in + let self_mask_hash = Hashtbl.find hashes address in + let parent_addr = Addr.parent_exn address in + let open Option in + function + | `Left (h_l, h_r) -> + ( parent_addr + , `Left + ( value self_mask_hash ~default:h_l + , value sibling_mask_hash ~default:h_r ) ) + | `Right (h_l, h_r) -> + ( parent_addr + , `Right + ( value sibling_mask_hash ~default:h_l + , value self_mask_hash ~default:h_r ) ) in - build_fixed_path path address [] + Fn.compose snd @@ List.fold_map ~init ~f (* the following merkle_path_* functions report the Merkle path for the mask *) let merkle_path_at_addr_exn t address = assert_is_attached t ; - match self_merkle_path t address with + match + self_merkle_path ~depth:t.depth ~hashes:t.hash_tbl + ~current_location:t.current_location address + with | Some path -> path | None -> let parent_merkle_path = Base.merkle_path_at_addr_exn (get_parent t) address in - fixup_merkle_path t parent_merkle_path address + fixup_merkle_path ~hashes:t.hash_tbl parent_merkle_path ~address let merkle_path_at_index_exn t index = merkle_path_at_addr_exn t (Addr.of_int_exn ~ledger_depth:t.depth index) @@ -389,83 +349,46 @@ module Make (Inputs : Inputs_intf.S) = struct let merkle_path t location = merkle_path_at_addr_exn t (Location.to_path_exn location) - let merkle_path_batch t locations = + let path_batch_impl ~fixup_path ~self_lookup ~base_lookup t locations = assert_is_attached t ; - let self_merkle_paths_rev = - List.rev_map locations ~f:(fun location -> + let parent = get_parent t in + let self_paths = + List.map locations ~f:(fun location -> let address = Location.to_path_exn location in - match self_merkle_path t address with - | Some path -> - Either.First path - | None -> - Either.Second (location, address) ) + self_lookup ~hashes:t.hash_tbl ~current_location:t.current_location + ~depth:t.depth address + |> Option.value_map + ~default:(Either.Second (location, address)) + ~f:Either.first ) in - let parent_merkle_paths_rev = - let parent_locations_rev = - List.filter_map self_merkle_paths_rev ~f:(function + let all_parent_paths = + let locs = + List.filter_map self_paths ~f:(function | Either.First _ -> None | Either.Second (location, _) -> Some location ) in - if List.is_empty parent_locations_rev then [] - else Base.merkle_path_batch (get_parent t) parent_locations_rev + if List.is_empty locs then [] else base_lookup parent locs in - let rec recombine self_merkle_paths_rev parent_merkle_paths_rev acc = - match (self_merkle_paths_rev, parent_merkle_paths_rev) with - | [], [] -> - acc - | Either.First path :: self_merkle_paths_rev, parent_merkle_paths_rev -> - recombine self_merkle_paths_rev parent_merkle_paths_rev (path :: acc) - | ( Either.Second (_, address) :: self_merkle_paths_rev - , path :: parent_merkle_paths_rev ) -> - let path = fixup_merkle_path t path address in - recombine self_merkle_paths_rev parent_merkle_paths_rev (path :: acc) - | _ :: _, [] -> - assert false - | [], _ :: _ -> - assert false + let f parent_paths = function + | Either.First path -> + (parent_paths, path) + | Either.Second (_, address) -> + let path = + fixup_path ~hashes:t.hash_tbl ~address (List.hd_exn parent_paths) + in + (List.tl_exn parent_paths, path) in - recombine self_merkle_paths_rev parent_merkle_paths_rev [] + snd @@ List.fold_map ~init:all_parent_paths ~f self_paths - let wide_merkle_path_batch t locations = - assert_is_attached t ; - let self_merkle_paths_rev = - List.rev_map locations ~f:(fun location -> - let address = Location.to_path_exn location in - match self_wide_merkle_path t address with - | Some path -> - Either.First path - | None -> - Either.Second (location, address) ) - in - let parent_merkle_paths_rev = - let parent_locations_rev = - List.filter_map self_merkle_paths_rev ~f:(function - | Either.First _ -> - None - | Either.Second (location, _) -> - Some location ) - in - if List.is_empty parent_locations_rev then [] - else Base.wide_merkle_path_batch (get_parent t) parent_locations_rev - in - let rec recombine self_merkle_paths_rev parent_merkle_paths_rev acc = - match (self_merkle_paths_rev, parent_merkle_paths_rev) with - | [], [] -> - acc - | Either.First path :: self_merkle_paths_rev, parent_merkle_paths_rev -> - recombine self_merkle_paths_rev parent_merkle_paths_rev (path :: acc) - | ( Either.Second (_, address) :: self_merkle_paths_rev - , path :: parent_merkle_paths_rev ) -> - let path = fixup_wide_merkle_path t path address in - recombine self_merkle_paths_rev parent_merkle_paths_rev (path :: acc) - | _ :: _, [] -> - assert false - | [], _ :: _ -> - assert false - in - recombine self_merkle_paths_rev parent_merkle_paths_rev [] + let merkle_path_batch = + path_batch_impl ~base_lookup:Base.merkle_path_batch + ~self_lookup:self_merkle_path ~fixup_path:fixup_merkle_path + + let wide_merkle_path_batch = + path_batch_impl ~base_lookup:Base.wide_merkle_path_batch + ~self_lookup:self_wide_merkle_path ~fixup_path:fixup_wide_merkle_path (* given a Merkle path corresponding to a starting address, calculate addresses and hashes for each node affected by the starting hash; that is, @@ -789,13 +712,9 @@ module Make (Inputs : Inputs_intf.S) = struct *) let unsafe_preload_accounts_from_parent t account_ids = assert_is_attached t ; - let locations = - Base.location_of_account_batch (get_parent t) account_ids - in - let non_empty_locations = - List.filter_map locations ~f:(fun (_account_id, location) -> location) - in - let accounts = Base.get_batch (get_parent t) non_empty_locations in + let locations = location_of_account_batch t account_ids in + let non_empty_locations = List.filter_map locations ~f:snd in + let accounts = get_batch t non_empty_locations in let all_hash_locations = let rec generate_locations account_locations acc = match account_locations with diff --git a/src/lib/mina_ledger/sparse_ledger.ml b/src/lib/mina_ledger/sparse_ledger.ml index c33c4350a8ba..84026e53d654 100644 --- a/src/lib/mina_ledger/sparse_ledger.ml +++ b/src/lib/mina_ledger/sparse_ledger.ml @@ -17,14 +17,14 @@ let iterate_n ~f = in impl -let of_ledger_subset_exn (oledger : Ledger.t) keys = +let of_ledger_subset_exn_impl ~path_query ~path_add (oledger : Ledger.t) keys = let locations = Ledger.location_of_account_batch oledger keys in let non_empty_locations = List.filter_map locations ~f:snd in let num_new_accounts = List.length locations - List.length non_empty_locations in let accounts = Ledger.get_batch oledger non_empty_locations in - let wide_merkle_paths, empty_merkle_paths = + let non_empty_paths, empty_paths = let next_location_exn loc = Option.value_exn (Ledger.Location.next loc) in let empty_address = Ledger.Addr.of_directions @@ -41,32 +41,22 @@ let of_ledger_subset_exn (oledger : Ledger.t) keys = first_loc :: iterate_n ~f:next_location_exn first_loc (num_new_accounts - 1) in - let paths = - Ledger.wide_merkle_path_batch oledger - (empty_locations @ non_empty_locations) - in + let paths = path_query oledger (empty_locations @ non_empty_locations) in List.split_n paths num_new_accounts in + let process_location sl key = function + | Some _, (_, Some account) :: accs, path :: ne_paths, epaths -> + (path_add sl path key account, accs, ne_paths, epaths) + | None, accs, ne_paths, path :: epaths -> + (path_add sl path key Account.empty, accs, ne_paths, epaths) + | _ -> + failwith "of_ledger_subset_exn: unexpected case" + in let sl, _, _, _ = List.fold locations - ~init: - (of_ledger_root oledger, accounts, wide_merkle_paths, empty_merkle_paths) - ~f:(fun (sl, accounts, paths, empty_paths) (key, location) -> - match location with - | Some _loc -> ( - match (accounts, paths) with - | (_, account) :: rest, merkle_path :: rest_paths -> - let sl = - add_wide_path_unsafe sl merkle_path key - (Option.value_exn account) - in - (sl, rest, rest_paths, empty_paths) - | _ -> - failwith "unexpected number of non empty accounts" ) - | None -> - let path = List.hd_exn empty_paths in - let sl = add_wide_path_unsafe sl path key Account.empty in - (sl, accounts, paths, List.tl_exn empty_paths) ) + ~init:(of_ledger_root oledger, accounts, non_empty_paths, empty_paths) + ~f:(fun (sl, accs, ne_paths, epaths) (key, mloc) -> + process_location sl key (mloc, accs, ne_paths, epaths) ) in Debug_assert.debug_assert (fun () -> [%test_eq: Ledger_hash.t] @@ -74,6 +64,10 @@ let of_ledger_subset_exn (oledger : Ledger.t) keys = ((merkle_root sl :> Random_oracle.Digest.t) |> Ledger_hash.of_hash) ) ; sl +let of_ledger_subset_exn = + of_ledger_subset_exn_impl ~path_query:Ledger.wide_merkle_path_batch + ~path_add:add_wide_path_unsafe + let of_ledger_index_subset_exn (ledger : Ledger.Any_ledger.witness) indexes = List.fold indexes ~init: diff --git a/src/lib/sparse_ledger_lib/dune b/src/lib/sparse_ledger_lib/dune index d4ceea278599..8eb2fd1f42a9 100644 --- a/src/lib/sparse_ledger_lib/dune +++ b/src/lib/sparse_ledger_lib/dune @@ -13,6 +13,8 @@ bin_prot.shape result ppx_version.runtime + ;; mina + mina_stdlib ) (preprocess (pps ppx_jane ppx_compare ppx_deriving_yojson ppx_version)) diff --git a/src/lib/sparse_ledger_lib/sparse_ledger.ml b/src/lib/sparse_ledger_lib/sparse_ledger.ml index 701f0219f1ed..5c2271e70ab4 100644 --- a/src/lib/sparse_ledger_lib/sparse_ledger.ml +++ b/src/lib/sparse_ledger_lib/sparse_ledger.ml @@ -185,38 +185,39 @@ end = struct } let add_wide_path_unsafe depth0 tree0 path0 account = - let rec build_tree hash height p = - match p with - | `Left (h_l, h_r) :: path -> - let l = build_tree h_l (height - 1) path in - Tree.Node (hash, l, Hash h_r) - | `Right (h_l, h_r) :: path -> - let r = build_tree h_r (height - 1) path in - Node (hash, Hash h_l, r) - | [] -> - assert (height = -1) ; - Account account + let f (prev_l, prev_r) = function + | `Left (h_l, h_r) -> + (Tree.Node (h_l, prev_l, prev_r), Tree.Hash h_r) + | `Right (h_l, h_r) -> + (Hash h_l, Node (h_r, prev_l, prev_r)) in let rec union height tree path = match (tree, path) with - | Tree.Hash h, path -> - let t = build_tree h height path in - [%test_result: Hash.t] - ~message: - "Hashes in union are not equal, something is wrong with your \ - ledger" - ~expect:h (hash t) ; - t - | Node (h, l, r), `Left (_h_l, _h_r) :: path -> - let l = union (height - 1) l path in - Node (h, l, r) - | Node (h, l, r), `Right (_h_l, _h_r) :: path -> - let r = union (height - 1) r path in - Node (h, l, r) - | Node _, [] -> - failwith "Path too short" - | Account _, _ :: _ -> - failwith "Path too long" + | Tree.Hash _, [] -> + assert (height = -1) ; + Tree.Account account + | Tree.Hash h, fst_el :: rest when List.length rest = height -> + (* Split `path` into last element and list of rest of elements in reversed order *) + let last_el, init_path_rev = + Mina_stdlib.Nonempty_list.(init fst_el rest |> rev |> uncons) + in + let init = + match last_el with + | `Left (_, h_r) -> + (Tree.Account account, Tree.Hash h_r) + | `Right (h_l, _) -> + (Hash h_l, Account account) + in + let l, r = List.fold ~init init_path_rev ~f in + Tree.Node (h, l, r) + | Node (h, l, r), `Left _ :: path -> + let l' = union (height - 1) l path in + Node (h, l', r) + | Node (h, l, r), `Right _ :: path -> + let r' = union (height - 1) r path in + Node (h, l, r') + | Tree.Hash _, _ | Account _, _ :: _ | Node _, [] -> + failwith "Path length doesn't match depth/tree" | Account _, [] -> tree in From eaa054cbfdee60b1c48497b47a67e1488786b0df Mon Sep 17 00:00:00 2001 From: georgeee Date: Mon, 27 Nov 2023 16:23:11 +0100 Subject: [PATCH 069/119] Small rewrite of path_batch_impl --- src/lib/merkle_ledger/database.ml | 30 ++++++++++++++---------------- 1 file changed, 14 insertions(+), 16 deletions(-) diff --git a/src/lib/merkle_ledger/database.ml b/src/lib/merkle_ledger/database.ml index 8a29790e4aa1..3ad6e007cb27 100644 --- a/src/lib/merkle_ledger/database.ml +++ b/src/lib/merkle_ledger/database.ml @@ -741,23 +741,21 @@ module Make (Inputs : Inputs_intf) : (* Batch-request hashes to answer the query `rev_locations` *) let rev_hashes = get_hash_batch_exn mdb rev_location_query in (* Reconstruct merkle paths from response, query, lengths structure and directions *) - let rec loop directions hashes lengths acc = - match (lengths, directions, hashes, acc) with - | [], [], [], [] :: acc_tl -> - acc_tl - | 0 :: lengths, _, _, _ -> - loop directions hashes lengths ([] :: acc) - | length :: lengths, direction :: directions, hashes, acc_hd :: acc_tl -> - let entry, rest_hashes = extract_hashes_exn ~direction hashes in - let dir = - Direction.map direction ~left:(`Left entry) ~right:(`Right entry) - in - loop directions rest_hashes ((length - 1) :: lengths) - ((dir :: acc_hd) :: acc_tl) - | _ -> - failwith "Mismatched lengths" + let f (directions, all_hashes, acc) length = + let dirs, rest_dirs = List.split_n directions length in + let rest_hashes, res = + List.fold_map dirs ~init:all_hashes ~f:(fun hashes direction -> + let entry, rest_hashes = extract_hashes_exn ~direction hashes in + let dir = + Direction.map direction ~left:(`Left entry) ~right:(`Right entry) + in + (rest_hashes, dir) ) + in + (rest_dirs, rest_hashes, res :: acc) in - loop rev_directions rev_hashes rev_lengths [ [] ] + (* essentially it's `List.rev_fold_map`, but there is no such operator sadly *) + Tuple3.get3 + @@ List.fold ~init:(rev_directions, rev_hashes, []) ~f rev_lengths let merkle_path_batch = let update_locs = Fn.compose List.cons Location.sibling in From 8e47aaff771bd812928bac42816de22f483b5b3c Mon Sep 17 00:00:00 2001 From: georgeee Date: Tue, 28 Nov 2023 21:26:20 +0100 Subject: [PATCH 070/119] Nit: rename variables --- src/lib/merkle_ledger/null_ledger.ml | 10 ++++------ src/lib/merkle_mask/masking_merkle_tree.ml | 10 +++++----- 2 files changed, 9 insertions(+), 11 deletions(-) diff --git a/src/lib/merkle_ledger/null_ledger.ml b/src/lib/merkle_ledger/null_ledger.ml index ac9dd31e6b43..644dbfc9a5cb 100644 --- a/src/lib/merkle_ledger/null_ledger.ml +++ b/src/lib/merkle_ledger/null_ledger.ml @@ -57,9 +57,9 @@ end = struct let h = Location.height ~ledger_depth:t.depth k in if h >= t.depth then [] else - let sibling_dir = Location.last_direction (Location.to_path_exn k) in + let dir = Location.last_direction (Location.to_path_exn k) in let hash = empty_hash_at_height h in - Direction.map sibling_dir ~left:(`Left hash) ~right:(`Right hash) + Direction.map dir ~left:(`Left hash) ~right:(`Right hash) :: loop (Location.parent k) in loop location @@ -77,11 +77,9 @@ end = struct let h = Location.height ~ledger_depth:t.depth k in if h >= t.depth then [] else - let sibling_dir = Location.last_direction (Location.to_path_exn k) in + let dir = Location.last_direction (Location.to_path_exn k) in let hash = empty_hash_at_height h in - Direction.map sibling_dir - ~left:(`Left (hash, hash)) - ~right:(`Right (hash, hash)) + Direction.map dir ~left:(`Left (hash, hash)) ~right:(`Right (hash, hash)) :: loop (Location.parent k) in loop location diff --git a/src/lib/merkle_mask/masking_merkle_tree.ml b/src/lib/merkle_mask/masking_merkle_tree.ml index bee2dcb8257a..b8f7667951e9 100644 --- a/src/lib/merkle_mask/masking_merkle_tree.ml +++ b/src/lib/merkle_mask/masking_merkle_tree.ml @@ -263,25 +263,25 @@ module Make (Inputs : Inputs_intf.S) = struct let self_merkle_path ~hashes ~current_location = let element height address = let sibling = Addr.sibling address in - let sibling_dir = Location.last_direction address in - let%map.Option hash = + let dir = Location.last_direction address in + let%map.Option sibling_hash = self_path_get_hash ~hashes ~current_location height sibling in - Direction.map sibling_dir ~left:(`Left hash) ~right:(`Right hash) + Direction.map dir ~left:(`Left sibling_hash) ~right:(`Right sibling_hash) in self_path_impl ~element let self_wide_merkle_path ~hashes ~current_location = let element height address = let sibling = Addr.sibling address in - let sibling_dir = Location.last_direction address in + let dir = Location.last_direction address in let%bind.Option sibling_hash = self_path_get_hash ~hashes ~current_location height sibling in let%map.Option self_hash = self_path_get_hash ~hashes ~current_location height address in - Direction.map sibling_dir + Direction.map dir ~left:(`Left (self_hash, sibling_hash)) ~right:(`Right (sibling_hash, self_hash)) in From d77b5e58ff39bb9be85cb42a79722f3114b525d0 Mon Sep 17 00:00:00 2001 From: Tang Jiawei Date: Wed, 29 Nov 2023 18:51:08 +0800 Subject: [PATCH 071/119] Refactor wide_merkle_path_batch --- src/lib/merkle_ledger/database.ml | 111 +++++++++++------------------- 1 file changed, 39 insertions(+), 72 deletions(-) diff --git a/src/lib/merkle_ledger/database.ml b/src/lib/merkle_ledger/database.ml index 3ad6e007cb27..07ede761e506 100644 --- a/src/lib/merkle_ledger/database.ml +++ b/src/lib/merkle_ledger/database.ml @@ -700,86 +700,53 @@ module Make (Inputs : Inputs_intf) : List.map2_exn dependency_dirs dependency_hashes ~f:(fun dir hash -> Direction.map dir ~left:(`Left hash) ~right:(`Right hash) ) - let path_batch_impl ~update_locs ~extract_hashes_exn mdb locations = + let path_batch_impl ~expand_query ~compute_path mdb locations = let locations = - List.map locations ~f:(fun loc -> - let loc' = - if Location.is_account loc then - Location.Hash (Location.to_path_exn loc) - else ( - assert (Location.is_hash loc) ; - loc ) - in - (loc', mdb.depth - Location.height ~ledger_depth:mdb.depth loc', 0) ) + List.map locations ~f:(fun location -> + if Location.is_account location then + Location.Hash (Location.to_path_exn location) + else ( + assert (Location.is_hash location) ; + location ) ) in - let rev_location_query, rev_directions, rev_lengths = - (* This loop is equivalent to: - 1. collecting location path for every location from `locations` - 2. updating the query with `update_locs` for every entry in every path from step 1. - 3. remembering length structure to be able to recover correspondence of hash lookups - to `locations` - 4. remembering directions corresponding to entries of location paths from step 1. - *) - let rec loop locations loc_acc dir_acc length_acc = - match (locations, length_acc) with - | [], length_acc -> - (loc_acc, dir_acc, length_acc) - | (_, 0, length) :: locations, length_acc -> - (* We found a root, all locations for it were added, - just the length needs to be added to length accumulator *) - loop locations loc_acc dir_acc (length :: length_acc) - | (k, depth, length) :: locations, length_acc -> - let sibling_dir = - Location.last_direction (Location.to_path_exn k) - in - loop - ((Location.parent k, depth - 1, length + 1) :: locations) - (update_locs k loc_acc) (sibling_dir :: dir_acc) length_acc - in - loop locations [] [] [] + let list_of_dependencies = + List.map locations ~f:Location.merkle_path_dependencies_exn in - (* Batch-request hashes to answer the query `rev_locations` *) - let rev_hashes = get_hash_batch_exn mdb rev_location_query in - (* Reconstruct merkle paths from response, query, lengths structure and directions *) - let f (directions, all_hashes, acc) length = - let dirs, rest_dirs = List.split_n directions length in - let rest_hashes, res = - List.fold_map dirs ~init:all_hashes ~f:(fun hashes direction -> - let entry, rest_hashes = extract_hashes_exn ~direction hashes in - let dir = - Direction.map direction ~left:(`Left entry) ~right:(`Right entry) - in - (rest_hashes, dir) ) - in - (rest_dirs, rest_hashes, res :: acc) + let all_locs = + List.concat list_of_dependencies |> List.map ~f:fst |> expand_query in - (* essentially it's `List.rev_fold_map`, but there is no such operator sadly *) - Tuple3.get3 - @@ List.fold ~init:(rev_directions, rev_hashes, []) ~f rev_lengths + let hashes = get_hash_batch_exn mdb all_locs in + snd @@ List.fold_map ~init:hashes ~f:compute_path list_of_dependencies let merkle_path_batch = - let update_locs = Fn.compose List.cons Location.sibling in - let extract_hashes_exn ~direction:_ hs = (List.hd_exn hs, List.tl_exn hs) in - path_batch_impl ~update_locs ~extract_hashes_exn + path_batch_impl ~expand_query:ident + ~compute_path:(fun all_hashes loc_and_dir_list -> + let len = List.length loc_and_dir_list in + let sibling_hashes, rest_hashes = List.split_n all_hashes len in + let res = + List.map2_exn loc_and_dir_list sibling_hashes + ~f:(fun (_, direction) sibling_hash -> + Direction.map direction ~left:(`Left sibling_hash) + ~right:(`Right sibling_hash) ) + in + (rest_hashes, res) ) let wide_merkle_path_batch = - let update_locs k = - Fn.compose (List.cons (Location.sibling k)) (List.cons k) - in - let extract_hashes_exn ~direction = function - | sibling :: self :: rest -> - let el = - match direction with - | Direction.Left -> - (self, sibling) - | Right -> - (sibling, self) - in - (el, rest) - | _ -> - failwith "wide_merkle_path_batch: mismatched lengths" - in - path_batch_impl ~update_locs ~extract_hashes_exn + path_batch_impl + ~expand_query:(fun sib_locs -> + sib_locs @ List.map sib_locs ~f:Location.sibling ) + ~compute_path:(fun all_hashes loc_and_dir_list -> + let len = List.length loc_and_dir_list in + let sibling_hashes, rest_hashes = List.split_n all_hashes len in + let self_hashes, rest_hashes' = List.split_n rest_hashes len in + let res = + List.map3_exn loc_and_dir_list sibling_hashes self_hashes + ~f:(fun (_, direction) sibling_hash self_hash -> + Direction.map direction + ~left:(`Left (self_hash, sibling_hash)) + ~right:(`Right (sibling_hash, self_hash)) ) + in + (rest_hashes', res) ) let merkle_path_at_addr_exn t addr = merkle_path t (Location.Hash addr) From 19c25009ca1f741c7542a6dfe1a8dc06a7cfe379 Mon Sep 17 00:00:00 2001 From: Tang Jiawei Date: Thu, 30 Nov 2023 00:25:08 +0800 Subject: [PATCH 072/119] fix the order of the locations --- src/lib/merkle_ledger/database.ml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/lib/merkle_ledger/database.ml b/src/lib/merkle_ledger/database.ml index 07ede761e506..ec1e2c1e8568 100644 --- a/src/lib/merkle_ledger/database.ml +++ b/src/lib/merkle_ledger/database.ml @@ -713,7 +713,7 @@ module Make (Inputs : Inputs_intf) : List.map locations ~f:Location.merkle_path_dependencies_exn in let all_locs = - List.concat list_of_dependencies |> List.map ~f:fst |> expand_query + List.map list_of_dependencies ~f:(fun deps -> List.map ~f:fst deps |> expand_query) |> List.concat in let hashes = get_hash_batch_exn mdb all_locs in snd @@ List.fold_map ~init:hashes ~f:compute_path list_of_dependencies From 13d8d2c11beb073f66072578f61f329a45f0761e Mon Sep 17 00:00:00 2001 From: georgeee Date: Wed, 29 Nov 2023 17:02:43 +0100 Subject: [PATCH 073/119] fixup! Use wide ledger paths for `Sparse_ledger.of_ledger_subset_exn` --- src/lib/mina_ledger/sparse_ledger.ml | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/src/lib/mina_ledger/sparse_ledger.ml b/src/lib/mina_ledger/sparse_ledger.ml index 84026e53d654..cd9ec6709158 100644 --- a/src/lib/mina_ledger/sparse_ledger.ml +++ b/src/lib/mina_ledger/sparse_ledger.ml @@ -24,7 +24,7 @@ let of_ledger_subset_exn_impl ~path_query ~path_add (oledger : Ledger.t) keys = List.length locations - List.length non_empty_locations in let accounts = Ledger.get_batch oledger non_empty_locations in - let non_empty_paths, empty_paths = + let empty_paths, non_empty_paths = let next_location_exn loc = Option.value_exn (Ledger.Location.next loc) in let empty_address = Ledger.Addr.of_directions @@ -49,8 +49,12 @@ let of_ledger_subset_exn_impl ~path_query ~path_add (oledger : Ledger.t) keys = (path_add sl path key account, accs, ne_paths, epaths) | None, accs, ne_paths, path :: epaths -> (path_add sl path key Account.empty, accs, ne_paths, epaths) + | Some _, (_, None) :: _, _, _ -> + failwith + "of_ledger_subset_exn: account not found for location returned by \ + location_of_account_batch" | _ -> - failwith "of_ledger_subset_exn: unexpected case" + failwith "of_ledger_subset_exn: mismatched lengths" in let sl, _, _, _ = List.fold locations From 39fee936f702314af4b1e478900e641757734f08 Mon Sep 17 00:00:00 2001 From: georgeee Date: Wed, 29 Nov 2023 18:38:54 +0100 Subject: [PATCH 074/119] Refactor add_wide_path_unsafe --- src/lib/sparse_ledger_lib/sparse_ledger.ml | 99 ++++++++++++++-------- 1 file changed, 62 insertions(+), 37 deletions(-) diff --git a/src/lib/sparse_ledger_lib/sparse_ledger.ml b/src/lib/sparse_ledger_lib/sparse_ledger.ml index 5c2271e70ab4..ca2da82e2612 100644 --- a/src/lib/sparse_ledger_lib/sparse_ledger.ml +++ b/src/lib/sparse_ledger_lib/sparse_ledger.ml @@ -184,44 +184,69 @@ end = struct ; indexes = (account_id, index) :: t.indexes } - let add_wide_path_unsafe depth0 tree0 path0 account = - let f (prev_l, prev_r) = function - | `Left (h_l, h_r) -> - (Tree.Node (h_l, prev_l, prev_r), Tree.Hash h_r) - | `Right (h_l, h_r) -> - (Hash h_l, Node (h_r, prev_l, prev_r)) + let add_wide_path_unsafe tree0 path0 account = + (* Traverses the tree along path, collecting node and untraversed sibling hashes + Stops when encounters `Hash` or `Account` node. + + Returns the last visited node (`Hash` or `Account`), remainder of path and + collected node/sibling hashes in bottom-to-top order. + *) + let rec traverse_through_nodes acc = function + | Tree.Hash h, rest -> + (acc, `Hash h, rest) + | Account a, rest -> + (acc, `Account a, rest) + | Node (h, l, r), `Left _ :: rest -> + traverse_through_nodes ((h, `Right_preserved r) :: acc) (l, rest) + | Node (h, l, r), `Right _ :: rest -> + traverse_through_nodes ((h, `Left_preserved l) :: acc) (r, rest) + | Node _, [] -> + failwith "path is shorter than a tree's branch" in - let rec union height tree path = - match (tree, path) with - | Tree.Hash _, [] -> - assert (height = -1) ; - Tree.Account account - | Tree.Hash h, fst_el :: rest when List.length rest = height -> - (* Split `path` into last element and list of rest of elements in reversed order *) - let last_el, init_path_rev = - Mina_stdlib.Nonempty_list.(init fst_el rest |> rev |> uncons) - in - let init = - match last_el with - | `Left (_, h_r) -> - (Tree.Account account, Tree.Hash h_r) - | `Right (h_l, _) -> - (Hash h_l, Account account) - in - let l, r = List.fold ~init init_path_rev ~f in - Tree.Node (h, l, r) - | Node (h, l, r), `Left _ :: path -> - let l' = union (height - 1) l path in - Node (h, l', r) - | Node (h, l, r), `Right _ :: path -> - let r' = union (height - 1) r path in - Node (h, l, r') - | Tree.Hash _, _ | Account _, _ :: _ | Node _, [] -> - failwith "Path length doesn't match depth/tree" - | Account _, [] -> - tree + (* Takes a list of collected node/sibling hashes in bottom-to-top order + (returned by the function above) and returns a tree that is reconstructed from + bottom to top, substituting siblings as necessary. *) + let build_to_top bottom_node = + List.fold ~init:bottom_node ~f:(fun node -> function + | h, `Left_preserved l -> + Tree.Node (h, l, node) + | h, `Right_preserved r -> + Tree.Node (h, node, r) ) in - union (depth0 - 1) tree0 (List.rev path0) + (* Uses wide path to build the tail of path *) + let build_tail hash_node_to_bottom_path = + let bottom_el, bottom_to_hash_node_path = + Mina_stdlib.Nonempty_list.(rev hash_node_to_bottom_path |> uncons) + in + (* Left and right branches of a node that is parent of the bottom node *) + let init = + match bottom_el with + | `Left (_, h_r) -> + (Tree.Account account, Tree.Hash h_r) + | `Right (h_l, _) -> + (Hash h_l, Account account) + in + let f (prev_l, prev_r) = function + | `Left (h_l, h_r) -> + (Tree.Node (h_l, prev_l, prev_r), Tree.Hash h_r) + | `Right (h_l, h_r) -> + (Hash h_l, Node (h_r, prev_l, prev_r)) + in + List.fold ~init bottom_to_hash_node_path ~f + in + let hash_to_top_path, hash_node, rest = + traverse_through_nodes [] (tree0, List.rev path0) + in + match (hash_node, Mina_stdlib.Nonempty_list.of_list_opt rest) with + | `Account _, None -> + tree0 + | `Hash _, None -> + build_to_top (Tree.Account account) hash_to_top_path + | `Hash h, Some hash_to_bottom_path -> + let tail_l, tail_r = build_tail hash_to_bottom_path in + build_to_top (Tree.Node (h, tail_l, tail_r)) hash_to_top_path + | `Account _, Some _ -> + failwith "path is longer than a tree's branch" let add_wide_path_unsafe (t : t) path account_id account = let index = @@ -229,7 +254,7 @@ end = struct match x with `Right _ -> acc + (1 lsl i) | `Left _ -> acc ) in { t with - tree = add_wide_path_unsafe t.depth t.tree path account + tree = add_wide_path_unsafe t.tree path account ; indexes = (account_id, index) :: t.indexes } From c20e6b3890eb76ce530e79316e8e920fb2a68a0c Mon Sep 17 00:00:00 2001 From: georgeee Date: Fri, 1 Dec 2023 17:20:12 +0100 Subject: [PATCH 075/119] Generalize add_path in sparse ledger --- src/lib/sparse_ledger_lib/sparse_ledger.ml | 110 ++++++++------------- 1 file changed, 41 insertions(+), 69 deletions(-) diff --git a/src/lib/sparse_ledger_lib/sparse_ledger.ml b/src/lib/sparse_ledger_lib/sparse_ledger.ml index ca2da82e2612..68f9a313ac8b 100644 --- a/src/lib/sparse_ledger_lib/sparse_ledger.ml +++ b/src/lib/sparse_ledger_lib/sparse_ledger.ml @@ -133,59 +133,8 @@ end = struct let merkle_root { T.tree; _ } = hash tree - let add_path depth0 tree0 path0 account = - let rec build_tree height p = - match p with - | `Left h_r :: path -> - let l = build_tree (height - 1) path in - Tree.Node (Hash.merge ~height (hash l) h_r, l, Hash h_r) - | `Right h_l :: path -> - let r = build_tree (height - 1) path in - Node (Hash.merge ~height h_l (hash r), Hash h_l, r) - | [] -> - assert (height = -1) ; - Account account - in - let rec union height tree path = - match (tree, path) with - | Tree.Hash h, path -> - let t = build_tree height path in - [%test_result: Hash.t] - ~message: - "Hashes in union are not equal, something is wrong with your \ - ledger" - ~expect:h (hash t) ; - t - | Node (h, l, r), `Left h_r :: path -> - assert (Hash.equal h_r (hash r)) ; - let l = union (height - 1) l path in - Node (h, l, r) - | Node (h, l, r), `Right h_l :: path -> - assert (Hash.equal h_l (hash l)) ; - let r = union (height - 1) r path in - Node (h, l, r) - | Node _, [] -> - failwith "Path too short" - | Account _, _ :: _ -> - failwith "Path too long" - | Account a, [] -> - assert (Account.equal a account) ; - tree - in - union (depth0 - 1) tree0 (List.rev path0) - - let add_path (t : t) path account_id account = - let index = - List.foldi path ~init:0 ~f:(fun i acc x -> - match x with `Right _ -> acc + (1 lsl i) | `Left _ -> acc ) - in - { t with - tree = add_path t.depth t.tree path account - ; indexes = (account_id, index) :: t.indexes - } - - let add_wide_path_unsafe tree0 path0 account = - (* Traverses the tree along path, collecting node and untraversed sibling hashes + let add_path_impl ~replace_self tree0 path0 account = + (* Traverses the tree along path, collecting nodes and untraversed sibling hashes Stops when encounters `Hash` or `Account` node. Returns the last visited node (`Hash` or `Account`), remainder of path and @@ -213,26 +162,27 @@ end = struct | h, `Right_preserved r -> Tree.Node (h, node, r) ) in - (* Uses wide path to build the tail of path *) + let build_tail_f height (prev_l, prev_r) = + replace_self ~f:(fun mself -> + let self = + match mself with + | Some self -> + self + | None -> + Hash.merge ~height (hash prev_l) (hash prev_r) + in + Tree.Node (self, prev_l, prev_r) ) + in + (* Builds the tail of path, i.e. part of the path that is not present in + the current ledger and we just add it all the way down to account + using the path *) let build_tail hash_node_to_bottom_path = let bottom_el, bottom_to_hash_node_path = Mina_stdlib.Nonempty_list.(rev hash_node_to_bottom_path |> uncons) in (* Left and right branches of a node that is parent of the bottom node *) - let init = - match bottom_el with - | `Left (_, h_r) -> - (Tree.Account account, Tree.Hash h_r) - | `Right (h_l, _) -> - (Hash h_l, Account account) - in - let f (prev_l, prev_r) = function - | `Left (h_l, h_r) -> - (Tree.Node (h_l, prev_l, prev_r), Tree.Hash h_r) - | `Right (h_l, h_r) -> - (Hash h_l, Node (h_r, prev_l, prev_r)) - in - List.fold ~init bottom_to_hash_node_path ~f + let init = replace_self ~f:(Fn.const (Tree.Account account)) bottom_el in + List.foldi ~init bottom_to_hash_node_path ~f:build_tail_f in let hash_to_top_path, hash_node, rest = traverse_through_nodes [] (tree0, List.rev path0) @@ -248,13 +198,35 @@ end = struct | `Account _, Some _ -> failwith "path is longer than a tree's branch" + let add_path (t : t) path account_id account = + let index = + List.foldi path ~init:0 ~f:(fun i acc x -> + match x with `Right _ -> acc + (1 lsl i) | `Left _ -> acc ) + in + let replace_self ~f = function + | `Left h_r -> + (f None, Tree.Hash h_r) + | `Right h_l -> + (Tree.Hash h_l, f None) + in + { t with + tree = add_path_impl ~replace_self t.tree path account + ; indexes = (account_id, index) :: t.indexes + } + let add_wide_path_unsafe (t : t) path account_id account = let index = List.foldi path ~init:0 ~f:(fun i acc x -> match x with `Right _ -> acc + (1 lsl i) | `Left _ -> acc ) in + let replace_self ~f = function + | `Left (h_l, h_r) -> + (f (Some h_l), Tree.Hash h_r) + | `Right (h_l, h_r) -> + (Tree.Hash h_l, f (Some h_r)) + in { t with - tree = add_wide_path_unsafe t.tree path account + tree = add_path_impl ~replace_self t.tree path account ; indexes = (account_id, index) :: t.indexes } From d6e6137703373fd835dbedb4af3d188580ff2567 Mon Sep 17 00:00:00 2001 From: georgeee Date: Fri, 1 Dec 2023 17:40:24 +0100 Subject: [PATCH 076/119] Simplify add_path_impl's recursion --- src/lib/sparse_ledger_lib/sparse_ledger.ml | 65 +++++++++------------- 1 file changed, 25 insertions(+), 40 deletions(-) diff --git a/src/lib/sparse_ledger_lib/sparse_ledger.ml b/src/lib/sparse_ledger_lib/sparse_ledger.ml index 68f9a313ac8b..bef5f2739327 100644 --- a/src/lib/sparse_ledger_lib/sparse_ledger.ml +++ b/src/lib/sparse_ledger_lib/sparse_ledger.ml @@ -134,34 +134,8 @@ end = struct let merkle_root { T.tree; _ } = hash tree let add_path_impl ~replace_self tree0 path0 account = - (* Traverses the tree along path, collecting nodes and untraversed sibling hashes - Stops when encounters `Hash` or `Account` node. - - Returns the last visited node (`Hash` or `Account`), remainder of path and - collected node/sibling hashes in bottom-to-top order. - *) - let rec traverse_through_nodes acc = function - | Tree.Hash h, rest -> - (acc, `Hash h, rest) - | Account a, rest -> - (acc, `Account a, rest) - | Node (h, l, r), `Left _ :: rest -> - traverse_through_nodes ((h, `Right_preserved r) :: acc) (l, rest) - | Node (h, l, r), `Right _ :: rest -> - traverse_through_nodes ((h, `Left_preserved l) :: acc) (r, rest) - | Node _, [] -> - failwith "path is shorter than a tree's branch" - in - (* Takes a list of collected node/sibling hashes in bottom-to-top order - (returned by the function above) and returns a tree that is reconstructed from - bottom to top, substituting siblings as necessary. *) - let build_to_top bottom_node = - List.fold ~init:bottom_node ~f:(fun node -> function - | h, `Left_preserved l -> - Tree.Node (h, l, node) - | h, `Right_preserved r -> - Tree.Node (h, node, r) ) - in + (* Takes height, left and right children and builds a pair of sibling nodes + one level up *) let build_tail_f height (prev_l, prev_r) = replace_self ~f:(fun mself -> let self = @@ -184,19 +158,30 @@ end = struct let init = replace_self ~f:(Fn.const (Tree.Account account)) bottom_el in List.foldi ~init bottom_to_hash_node_path ~f:build_tail_f in - let hash_to_top_path, hash_node, rest = - traverse_through_nodes [] (tree0, List.rev path0) + (* Traverses the tree along path, collecting nodes and untraversed sibling hashes + Stops when encounters `Hash` or `Account` node. + + Returns the last visited node (`Hash` or `Account`), remainder of path and + collected node/sibling hashes in bottom-to-top order. + *) + let rec traverse_through_nodes = function + | Tree.Account _, _ :: _ -> + failwith "path is longer than a tree's branch" + | Account _, [] | Tree.Hash _, [] -> + Tree.Account account + | Tree.Hash h, fst_el :: rest -> + let tail_l, tail_r = + build_tail (Mina_stdlib.Nonempty_list.init fst_el rest) + in + Tree.Node (h, tail_l, tail_r) + | Node (h, l, r), `Left _ :: rest -> + Tree.Node (h, traverse_through_nodes (l, rest), r) + | Node (h, l, r), `Right _ :: rest -> + Tree.Node (h, l, traverse_through_nodes (r, rest)) + | Node _, [] -> + failwith "path is shorter than a tree's branch" in - match (hash_node, Mina_stdlib.Nonempty_list.of_list_opt rest) with - | `Account _, None -> - tree0 - | `Hash _, None -> - build_to_top (Tree.Account account) hash_to_top_path - | `Hash h, Some hash_to_bottom_path -> - let tail_l, tail_r = build_tail hash_to_bottom_path in - build_to_top (Tree.Node (h, tail_l, tail_r)) hash_to_top_path - | `Account _, Some _ -> - failwith "path is longer than a tree's branch" + traverse_through_nodes (tree0, List.rev path0) let add_path (t : t) path account_id account = let index = From 758955f4c70b0f39d6471f991fae2b4af8e93040 Mon Sep 17 00:00:00 2001 From: Nathan Holland Date: Tue, 21 Nov 2023 11:51:30 -0600 Subject: [PATCH 077/119] Replace tables with maps in merkle masks --- src/lib/merkle_address/merkle_address.ml | 37 +++++----- src/lib/merkle_address/merkle_address.mli | 2 + src/lib/merkle_mask/inputs_intf.ml | 7 +- src/lib/merkle_mask/masking_merkle_tree.ml | 85 +++++++++++----------- src/lib/mina_ledger/ledger.ml | 1 + 5 files changed, 69 insertions(+), 63 deletions(-) diff --git a/src/lib/merkle_address/merkle_address.ml b/src/lib/merkle_address/merkle_address.ml index 641c37bdfe0d..d5030726733a 100644 --- a/src/lib/merkle_address/merkle_address.ml +++ b/src/lib/merkle_address/merkle_address.ml @@ -97,9 +97,9 @@ let height ~ledger_depth path = ledger_depth - depth path let get = get -[%%define_locally -Stable.Latest.(t_of_sexp, sexp_of_t, to_yojson, compare, equal)] +[%%define_locally Stable.Latest.(t_of_sexp, sexp_of_t, to_yojson)] +include Comparable.Make_binable (Stable.Latest) include Hashable.Make_binable (Stable.Latest) let of_byte_string = bitstring_of_string @@ -114,13 +114,13 @@ let copy (path : t) : t = (* returns a slice of the original path, so the returned key needs to be copied before mutating the path *) let parent (path : t) = - if bitstring_length path = 0 then + if Int.equal (bitstring_length path) 0 then Or_error.error_string "Address length should be nonzero" else Or_error.return (slice path 0 (bitstring_length path - 1)) let parent_exn = Fn.compose Or_error.ok_exn parent -let is_leaf ~ledger_depth path = bitstring_length path >= ledger_depth +let is_leaf ~ledger_depth path = Int.(bitstring_length path >= ledger_depth) let child ~ledger_depth (path : t) dir : t Or_error.t = if is_leaf ~ledger_depth path then @@ -137,10 +137,10 @@ let to_int (path : t) : int = Sequence.range 0 (depth path) |> Sequence.fold ~init:0 ~f:(fun acc i -> let index = depth path - 1 - i in - acc + ((if get path index <> 0 then 1 else 0) lsl i) ) + acc + ((if Int.(get path index <> 0) then 1 else 0) lsl i) ) let of_int_exn ~ledger_depth index = - if index >= 1 lsl ledger_depth then failwith "Index is too large" + if Int.(index >= 1 lsl ledger_depth) then failwith "Index is too large" else let buf = create_bitstring ledger_depth in ignore @@ -160,7 +160,7 @@ let root () = create_bitstring 0 let sibling (path : t) : t = let path = copy path in let last_bit_index = depth path - 1 in - let last_bit = if get path last_bit_index = 0 then 1 else 0 in + let last_bit = if Int.equal (get path last_bit_index) 0 then 1 else 0 in put path last_bit_index last_bit ; path @@ -169,12 +169,12 @@ let next (path : t) : t Option.t = let path = copy path in let len = depth path in let rec find_rightmost_clear_bit i = - if i < 0 then None + if Int.(i < 0) then None else if is_clear path i then Some i else find_rightmost_clear_bit (i - 1) in let rec clear_bits i = - if i >= len then () + if Int.(i >= len) then () else ( clear path i ; clear_bits (i + 1) ) @@ -189,12 +189,12 @@ let prev (path : t) : t Option.t = let path = copy path in let len = depth path in let rec find_rightmost_one_bit i = - if i < 0 then None + if Int.(i < 0) then None else if is_set path i then Some i else find_rightmost_one_bit (i - 1) in let rec set_bits i = - if i >= len then () + if Int.(i >= len) then () else ( set path i ; set_bits (i + 1) ) @@ -208,7 +208,7 @@ let serialize ~ledger_depth path = let path = add_padding path in let path_len = depth path in let required_bits = 8 * byte_count_of_bits ledger_depth in - assert (path_len <= required_bits) ; + assert (Int.(path_len <= required_bits)) ; let required_padding = required_bits - path_len in Bigstring.of_string @@ string_of_bitstring @@ concat [ path; zeroes_bitstring required_padding ] @@ -218,27 +218,28 @@ let is_parent_of parent ~maybe_child = Bitstring.is_prefix maybe_child parent let same_height_ancestors x y = let depth_x = depth x in let depth_y = depth y in - if depth_x < depth_y then (x, slice y 0 depth_x) else (slice x 0 depth_y, y) + if Int.(depth_x < depth_y) then (x, slice y 0 depth_x) + else (slice x 0 depth_y, y) let is_further_right ~than path = let than, path = same_height_ancestors than path in - compare than path < 0 + Int.( < ) (compare than path) 0 module Range = struct type nonrec t = t * t let rec fold_exl (first, last) ~init ~f = let comparison = compare first last in - if comparison > 0 then + if Int.(comparison > 0) then raise (Invalid_argument "first address needs to precede last address") - else if comparison = 0 then init + else if Int.(comparison = 0) then init else fold_exl (next first |> Option.value_exn, last) ~init:(f first init) ~f let fold_incl (first, last) ~init ~f = f last @@ fold_exl (first, last) ~init ~f let fold ?(stop = `Inclusive) (first, last) ~init ~f = - assert (depth first = depth last) ; + assert (Int.(depth first = depth last)) ; match stop with | `Inclusive -> fold_incl (first, last) ~init ~f @@ -262,7 +263,7 @@ module Range = struct | _, `Stop -> None | current_node, `Don't_stop -> - if compare current_node last_node = 0 then + if Int.equal (compare current_node last_node) 0 then Some (current_node, (current_node, `Stop)) else Option.map (next current_node) ~f:(fun next_node -> diff --git a/src/lib/merkle_address/merkle_address.mli b/src/lib/merkle_address/merkle_address.mli index 98d21ad95cf8..af169723da8c 100644 --- a/src/lib/merkle_address/merkle_address.mli +++ b/src/lib/merkle_address/merkle_address.mli @@ -11,6 +11,8 @@ module Stable : sig module Latest : module type of V1 end +include Comparable.S_binable with type t := t + include Hashable.S_binable with type t := t val of_byte_string : string -> t diff --git a/src/lib/merkle_mask/inputs_intf.ml b/src/lib/merkle_mask/inputs_intf.ml index 761b5768ddb0..48647cd4d61e 100644 --- a/src/lib/merkle_mask/inputs_intf.ml +++ b/src/lib/merkle_mask/inputs_intf.ml @@ -20,8 +20,11 @@ module type S = sig module Location : Merkle_ledger.Location_intf.S - module Location_binable : - Core_kernel.Hashable.S_binable with type t := Location.t + module Location_binable : sig + include Core_kernel.Hashable.S_binable with type t := Location.t + + include Core_kernel.Comparable.S_binable with type t := Location.t + end module Base : Base_merkle_tree_intf.S diff --git a/src/lib/merkle_mask/masking_merkle_tree.ml b/src/lib/merkle_mask/masking_merkle_tree.ml index b8f7667951e9..178253838f30 100644 --- a/src/lib/merkle_mask/masking_merkle_tree.ml +++ b/src/lib/merkle_mask/masking_merkle_tree.ml @@ -40,12 +40,12 @@ module Make (Inputs : Inputs_intf.S) = struct type t = { uuid : Uuid.Stable.V1.t - ; account_tbl : Account.t Location_binable.Table.t - ; token_owners : Account_id.t Token_id.Table.t + ; mutable accounts : Account.t Location_binable.Map.t + ; mutable token_owners : Account_id.t Token_id.Map.t ; mutable parent : Parent.t ; detached_parent_signal : Detached_parent_signal.t - ; hash_tbl : Hash.t Addr.Table.t - ; location_tbl : Location.t Account_id.Table.t + ; mutable hashes : Hash.t Addr.Map.t + ; mutable locations : Location.t Account_id.Map.t ; mutable current_location : Location.t option ; depth : int } @@ -57,10 +57,10 @@ module Make (Inputs : Inputs_intf.S) = struct { uuid = Uuid_unix.create () ; parent = Error __LOC__ ; detached_parent_signal = Async.Ivar.create () - ; account_tbl = Location_binable.Table.create () - ; token_owners = Token_id.Table.create () - ; hash_tbl = Addr.Table.create () - ; location_tbl = Account_id.Table.create () + ; accounts = Location_binable.Map.empty + ; token_owners = Token_id.Map.empty + ; hashes = Addr.Map.empty + ; locations = Account_id.Map.empty ; current_location = None ; depth } @@ -133,12 +133,11 @@ module Make (Inputs : Inputs_intf.S) = struct (* don't rely on a particular implementation *) let self_find_hash t address = - assert_is_attached t ; - Addr.Table.find t.hash_tbl address + assert_is_attached t ; Map.find t.hashes address let self_set_hash t address hash = assert_is_attached t ; - Addr.Table.set t.hash_tbl ~key:address ~data:hash + t.hashes <- Map.set t.hashes ~key:address ~data:hash let set_inner_hash_at_addr_exn t address hash = assert_is_attached t ; @@ -148,11 +147,11 @@ module Make (Inputs : Inputs_intf.S) = struct (* don't rely on a particular implementation *) let self_find_location t account_id = assert_is_attached t ; - Account_id.Table.find t.location_tbl account_id + Map.find t.locations account_id let self_set_location t account_id location = assert_is_attached t ; - Account_id.Table.set t.location_tbl ~key:account_id ~data:location ; + t.locations <- Map.set t.locations ~key:account_id ~data:location ; (* if account is at a hitherto-unused location, that becomes the current location *) @@ -166,11 +165,11 @@ module Make (Inputs : Inputs_intf.S) = struct (* don't rely on a particular implementation *) let self_find_account t location = assert_is_attached t ; - Location_binable.Table.find t.account_tbl location + Map.find t.accounts location let self_set_account t location account = assert_is_attached t ; - Location_binable.Table.set t.account_tbl ~key:location ~data:account ; + t.accounts <- Map.set t.accounts ~key:location ~data:account ; self_set_location t (Account.identifier account) location (* a read does a lookup in the account_tbl; if that fails, delegate to @@ -424,14 +423,15 @@ module Make (Inputs : Inputs_intf.S) = struct assert_is_attached t ; (* remove account and key from tables *) let account = Option.value_exn (self_find_account t location) in - Location_binable.Table.remove t.account_tbl location ; + t.accounts <- Map.remove t.accounts location ; (* Update token info. *) let account_id = Account.identifier account in - Token_id.Table.remove t.token_owners - (Account_id.derive_token_id ~owner:account_id) ; + t.token_owners <- + Token_id.Map.remove t.token_owners + (Account_id.derive_token_id ~owner:account_id) ; (* TODO : use stack database to save unused location, which can be used when allocating a location *) - Account_id.Table.remove t.location_tbl account_id ; + t.locations <- Map.remove t.locations account_id ; (* reuse location if possible *) Option.iter t.current_location ~f:(fun curr_loc -> if Location.equal location curr_loc then @@ -456,9 +456,10 @@ module Make (Inputs : Inputs_intf.S) = struct self_set_account t location account ; (* Update token info. *) let account_id = Account.identifier account in - Token_id.Table.set t.token_owners - ~key:(Account_id.derive_token_id ~owner:account_id) - ~data:account_id + t.token_owners <- + Map.set t.token_owners + ~key:(Account_id.derive_token_id ~owner:account_id) + ~data:account_id (* a write writes only to the mask, parent is not involved need to update both account and hash pieces of the mask *) @@ -541,10 +542,10 @@ module Make (Inputs : Inputs_intf.S) = struct let commit t = assert_is_attached t ; let old_root_hash = merkle_root t in - let account_data = Location_binable.Table.to_alist t.account_tbl in + let account_data = Map.to_alist t.accounts in Base.set_batch (get_parent t) account_data ; - Location_binable.Table.clear t.account_tbl ; - Addr.Table.clear t.hash_tbl ; + t.accounts <- Location_binable.Map.empty ; + t.hashes <- Addr.Map.empty ; Debug_assert.debug_assert (fun () -> [%test_result: Hash.t] ~message: @@ -562,10 +563,10 @@ module Make (Inputs : Inputs_intf.S) = struct { uuid = Uuid_unix.create () ; parent = Ok (get_parent t) ; detached_parent_signal = Async.Ivar.create () - ; account_tbl = Location_binable.Table.copy t.account_tbl - ; token_owners = Token_id.Table.copy t.token_owners - ; location_tbl = Account_id.Table.copy t.location_tbl - ; hash_tbl = Addr.Table.copy t.hash_tbl + ; accounts = t.accounts + ; token_owners = t.token_owners + ; locations = t.locations + ; hashes = t.hashes ; current_location = t.current_location ; depth = t.depth } @@ -627,15 +628,15 @@ module Make (Inputs : Inputs_intf.S) = struct let set_location_batch ~last_location t account_to_location_list = t.current_location <- Some last_location ; Mina_stdlib.Nonempty_list.iter account_to_location_list - ~f:(fun (key, data) -> - Account_id.Table.set t.location_tbl ~key ~data ) + ~f:(fun (key, data) -> t.locations <- Map.set t.locations ~key ~data) let set_raw_account_batch t locations_and_accounts = List.iter locations_and_accounts ~f:(fun (location, account) -> let account_id = Account.identifier account in - Token_id.Table.set t.token_owners - ~key:(Account_id.derive_token_id ~owner:account_id) - ~data:account_id ; + t.token_owners <- + Map.set t.token_owners + ~key:(Account_id.derive_token_id ~owner:account_id) + ~data:account_id ; self_set_account t location account ) end) @@ -650,7 +651,7 @@ module Make (Inputs : Inputs_intf.S) = struct let token_owner t tid = assert_is_attached t ; - match Token_id.Table.find t.token_owners tid with + match Map.find t.token_owners tid with | Some id -> Some id | None -> @@ -659,7 +660,7 @@ module Make (Inputs : Inputs_intf.S) = struct let token_owners (t : t) : Account_id.Set.t = assert_is_attached t ; let mask_owners = - Hashtbl.fold t.token_owners ~init:Account_id.Set.empty + Map.fold t.token_owners ~init:Account_id.Set.empty ~f:(fun ~key:_tid ~data:owner acc -> Set.add acc owner) in Set.union mask_owners (Base.token_owners (get_parent t)) @@ -667,7 +668,7 @@ module Make (Inputs : Inputs_intf.S) = struct let tokens t pk = assert_is_attached t ; let mask_tokens = - Account_id.Table.keys t.location_tbl + Map.keys t.locations |> List.filter_map ~f:(fun aid -> if Key.equal pk (Account_id.public_key aid) then Some (Account_id.token_id aid) @@ -798,9 +799,9 @@ module Make (Inputs : Inputs_intf.S) = struct as sometimes this is desired behavior *) let close t = assert_is_attached t ; - Location_binable.Table.clear t.account_tbl ; - Addr.Table.clear t.hash_tbl ; - Account_id.Table.clear t.location_tbl ; + t.accounts <- Location_binable.Map.empty ; + t.hashes <- Addr.Map.empty ; + t.locations <- Account_id.Map.empty ; Async.Ivar.fill_if_empty t.detached_parent_signal () let index_of_account_exn t key = @@ -844,9 +845,7 @@ module Make (Inputs : Inputs_intf.S) = struct let foldi_with_ignored_accounts t ignored_accounts ~init ~f = assert_is_attached t ; - let locations_and_accounts = - Location_binable.Table.to_alist t.account_tbl - in + let locations_and_accounts = Map.to_alist t.accounts in (* parent should ignore accounts in this mask *) let mask_accounts = List.map locations_and_accounts ~f:(fun (_loc, acct) -> diff --git a/src/lib/mina_ledger/ledger.ml b/src/lib/mina_ledger/ledger.ml index 87fb51b7ae1b..0272b13c8ca5 100644 --- a/src/lib/mina_ledger/ledger.ml +++ b/src/lib/mina_ledger/ledger.ml @@ -22,6 +22,7 @@ module Ledger_inner = struct | Hash of Location_at_depth.Addr.t [@@deriving hash, sexp, compare] + include Comparable.Make_binable (Arg) include Hashable.Make_binable (Arg) [@@deriving sexp, compare, hash, yojson] end From 175c24ab3cdc3e25f7b11e41423ef5df9cdb6992 Mon Sep 17 00:00:00 2001 From: Nathan Holland Date: Wed, 22 Nov 2023 05:16:31 -0600 Subject: [PATCH 078/119] Fix merkle ledger tests --- src/lib/merkle_ledger_tests/test_mask.ml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/lib/merkle_ledger_tests/test_mask.ml b/src/lib/merkle_ledger_tests/test_mask.ml index ce361f9d5266..f273f52d798e 100644 --- a/src/lib/merkle_ledger_tests/test_mask.ml +++ b/src/lib/merkle_ledger_tests/test_mask.ml @@ -752,8 +752,9 @@ module Make_maskable_and_mask_with_depth (Depth : Depth_S) = struct | Generic of Merkle_ledger.Location.Bigstring.t | Account of Location.Addr.t | Hash of Location.Addr.t - [@@deriving hash, sexp, compare] + [@@deriving hash, sexp] + include Comparable.Make_binable (Arg) include Hashable.Make_binable (Arg) [@@deriving sexp, compare, hash, yojson] end From 8371f64d71792b9bfec797af0cf542c690b3c8a5 Mon Sep 17 00:00:00 2001 From: Nathan Holland Date: Wed, 22 Nov 2023 10:07:45 -0600 Subject: [PATCH 079/119] Use map refs instead of mutable map fields --- src/lib/merkle_mask/masking_merkle_tree.ml | 76 +++++++++++----------- 1 file changed, 39 insertions(+), 37 deletions(-) diff --git a/src/lib/merkle_mask/masking_merkle_tree.ml b/src/lib/merkle_mask/masking_merkle_tree.ml index 178253838f30..99f927da132d 100644 --- a/src/lib/merkle_mask/masking_merkle_tree.ml +++ b/src/lib/merkle_mask/masking_merkle_tree.ml @@ -40,12 +40,12 @@ module Make (Inputs : Inputs_intf.S) = struct type t = { uuid : Uuid.Stable.V1.t - ; mutable accounts : Account.t Location_binable.Map.t - ; mutable token_owners : Account_id.t Token_id.Map.t + ; accounts : Account.t Location_binable.Map.t ref + ; token_owners : Account_id.t Token_id.Map.t ref ; mutable parent : Parent.t ; detached_parent_signal : Detached_parent_signal.t - ; mutable hashes : Hash.t Addr.Map.t - ; mutable locations : Location.t Account_id.Map.t + ; hashes : Hash.t Addr.Map.t ref + ; locations : Location.t Account_id.Map.t ref ; mutable current_location : Location.t option ; depth : int } @@ -57,10 +57,10 @@ module Make (Inputs : Inputs_intf.S) = struct { uuid = Uuid_unix.create () ; parent = Error __LOC__ ; detached_parent_signal = Async.Ivar.create () - ; accounts = Location_binable.Map.empty - ; token_owners = Token_id.Map.empty - ; hashes = Addr.Map.empty - ; locations = Account_id.Map.empty + ; accounts = ref Location_binable.Map.empty + ; token_owners = ref Token_id.Map.empty + ; hashes = ref Addr.Map.empty + ; locations = ref Account_id.Map.empty ; current_location = None ; depth } @@ -133,11 +133,12 @@ module Make (Inputs : Inputs_intf.S) = struct (* don't rely on a particular implementation *) let self_find_hash t address = - assert_is_attached t ; Map.find t.hashes address + assert_is_attached t ; + Map.find !(t.hashes) address let self_set_hash t address hash = assert_is_attached t ; - t.hashes <- Map.set t.hashes ~key:address ~data:hash + t.hashes := Map.set !(t.hashes) ~key:address ~data:hash let set_inner_hash_at_addr_exn t address hash = assert_is_attached t ; @@ -147,11 +148,11 @@ module Make (Inputs : Inputs_intf.S) = struct (* don't rely on a particular implementation *) let self_find_location t account_id = assert_is_attached t ; - Map.find t.locations account_id + Map.find !(t.locations) account_id let self_set_location t account_id location = assert_is_attached t ; - t.locations <- Map.set t.locations ~key:account_id ~data:location ; + t.locations := Map.set !(t.locations) ~key:account_id ~data:location ; (* if account is at a hitherto-unused location, that becomes the current location *) @@ -165,11 +166,11 @@ module Make (Inputs : Inputs_intf.S) = struct (* don't rely on a particular implementation *) let self_find_account t location = assert_is_attached t ; - Map.find t.accounts location + Map.find !(t.accounts) location let self_set_account t location account = assert_is_attached t ; - t.accounts <- Map.set t.accounts ~key:location ~data:account ; + t.accounts := Map.set !(t.accounts) ~key:location ~data:account ; self_set_location t (Account.identifier account) location (* a read does a lookup in the account_tbl; if that fails, delegate to @@ -423,15 +424,15 @@ module Make (Inputs : Inputs_intf.S) = struct assert_is_attached t ; (* remove account and key from tables *) let account = Option.value_exn (self_find_account t location) in - t.accounts <- Map.remove t.accounts location ; + t.accounts := Map.remove !(t.accounts) location ; (* Update token info. *) let account_id = Account.identifier account in - t.token_owners <- - Token_id.Map.remove t.token_owners + t.token_owners := + Token_id.Map.remove !(t.token_owners) (Account_id.derive_token_id ~owner:account_id) ; (* TODO : use stack database to save unused location, which can be used when allocating a location *) - t.locations <- Map.remove t.locations account_id ; + t.locations := Map.remove !(t.locations) account_id ; (* reuse location if possible *) Option.iter t.current_location ~f:(fun curr_loc -> if Location.equal location curr_loc then @@ -456,8 +457,8 @@ module Make (Inputs : Inputs_intf.S) = struct self_set_account t location account ; (* Update token info. *) let account_id = Account.identifier account in - t.token_owners <- - Map.set t.token_owners + t.token_owners := + Map.set !(t.token_owners) ~key:(Account_id.derive_token_id ~owner:account_id) ~data:account_id @@ -542,10 +543,10 @@ module Make (Inputs : Inputs_intf.S) = struct let commit t = assert_is_attached t ; let old_root_hash = merkle_root t in - let account_data = Map.to_alist t.accounts in + let account_data = Map.to_alist !(t.accounts) in Base.set_batch (get_parent t) account_data ; - t.accounts <- Location_binable.Map.empty ; - t.hashes <- Addr.Map.empty ; + t.accounts := Location_binable.Map.empty ; + t.hashes := Addr.Map.empty ; Debug_assert.debug_assert (fun () -> [%test_result: Hash.t] ~message: @@ -563,10 +564,10 @@ module Make (Inputs : Inputs_intf.S) = struct { uuid = Uuid_unix.create () ; parent = Ok (get_parent t) ; detached_parent_signal = Async.Ivar.create () - ; accounts = t.accounts - ; token_owners = t.token_owners - ; locations = t.locations - ; hashes = t.hashes + ; accounts = ref !(t.accounts) + ; token_owners = ref !(t.token_owners) + ; locations = ref !(t.locations) + ; hashes = ref !(t.hashes) ; current_location = t.current_location ; depth = t.depth } @@ -628,13 +629,14 @@ module Make (Inputs : Inputs_intf.S) = struct let set_location_batch ~last_location t account_to_location_list = t.current_location <- Some last_location ; Mina_stdlib.Nonempty_list.iter account_to_location_list - ~f:(fun (key, data) -> t.locations <- Map.set t.locations ~key ~data) + ~f:(fun (key, data) -> + t.locations := Map.set !(t.locations) ~key ~data ) let set_raw_account_batch t locations_and_accounts = List.iter locations_and_accounts ~f:(fun (location, account) -> let account_id = Account.identifier account in - t.token_owners <- - Map.set t.token_owners + t.token_owners := + Map.set !(t.token_owners) ~key:(Account_id.derive_token_id ~owner:account_id) ~data:account_id ; self_set_account t location account ) @@ -651,7 +653,7 @@ module Make (Inputs : Inputs_intf.S) = struct let token_owner t tid = assert_is_attached t ; - match Map.find t.token_owners tid with + match Map.find !(t.token_owners) tid with | Some id -> Some id | None -> @@ -660,7 +662,7 @@ module Make (Inputs : Inputs_intf.S) = struct let token_owners (t : t) : Account_id.Set.t = assert_is_attached t ; let mask_owners = - Map.fold t.token_owners ~init:Account_id.Set.empty + Map.fold !(t.token_owners) ~init:Account_id.Set.empty ~f:(fun ~key:_tid ~data:owner acc -> Set.add acc owner) in Set.union mask_owners (Base.token_owners (get_parent t)) @@ -668,7 +670,7 @@ module Make (Inputs : Inputs_intf.S) = struct let tokens t pk = assert_is_attached t ; let mask_tokens = - Map.keys t.locations + Map.keys !(t.locations) |> List.filter_map ~f:(fun aid -> if Key.equal pk (Account_id.public_key aid) then Some (Account_id.token_id aid) @@ -799,9 +801,9 @@ module Make (Inputs : Inputs_intf.S) = struct as sometimes this is desired behavior *) let close t = assert_is_attached t ; - t.accounts <- Location_binable.Map.empty ; - t.hashes <- Addr.Map.empty ; - t.locations <- Account_id.Map.empty ; + t.accounts := Location_binable.Map.empty ; + t.hashes := Addr.Map.empty ; + t.locations := Account_id.Map.empty ; Async.Ivar.fill_if_empty t.detached_parent_signal () let index_of_account_exn t key = @@ -845,7 +847,7 @@ module Make (Inputs : Inputs_intf.S) = struct let foldi_with_ignored_accounts t ignored_accounts ~init ~f = assert_is_attached t ; - let locations_and_accounts = Map.to_alist t.accounts in + let locations_and_accounts = Map.to_alist !(t.accounts) in (* parent should ignore accounts in this mask *) let mask_accounts = List.map locations_and_accounts ~f:(fun (_loc, acct) -> From 4403440f55d56cdb622f55a3ca4e1c032f608b6b Mon Sep 17 00:00:00 2001 From: georgeee Date: Mon, 27 Nov 2023 20:25:24 +0100 Subject: [PATCH 080/119] Fixup after rebase --- src/lib/merkle_mask/masking_merkle_tree.ml | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/src/lib/merkle_mask/masking_merkle_tree.ml b/src/lib/merkle_mask/masking_merkle_tree.ml index 99f927da132d..b35ba1d6ad7c 100644 --- a/src/lib/merkle_mask/masking_merkle_tree.ml +++ b/src/lib/merkle_mask/masking_merkle_tree.ml @@ -237,7 +237,7 @@ module Make (Inputs : Inputs_intf.S) = struct Empty_hashes.extensible_cache (module Hash) ~init_hash:Hash.empty_account let self_path_get_hash ~hashes ~current_location height address = - match Hashtbl.find hashes address with + match Map.find hashes address with | Some hash -> Some hash | None -> @@ -292,7 +292,7 @@ module Make (Inputs : Inputs_intf.S) = struct let fixup_merkle_path ~hashes ~address:init = let f address = (* first element in the path contains hash at sibling of address *) - let sibling_mask_hash = Hashtbl.find hashes (Addr.sibling address) in + let sibling_mask_hash = Map.find hashes (Addr.sibling address) in let parent_addr = Addr.parent_exn address in let open Option in function @@ -308,8 +308,8 @@ module Make (Inputs : Inputs_intf.S) = struct let fixup_wide_merkle_path ~hashes ~address:init = let f address = (* element in the path contains hash at sibling of address *) - let sibling_mask_hash = Hashtbl.find hashes (Addr.sibling address) in - let self_mask_hash = Hashtbl.find hashes address in + let sibling_mask_hash = Map.find hashes (Addr.sibling address) in + let self_mask_hash = Map.find hashes address in let parent_addr = Addr.parent_exn address in let open Option in function @@ -332,7 +332,7 @@ module Make (Inputs : Inputs_intf.S) = struct let merkle_path_at_addr_exn t address = assert_is_attached t ; match - self_merkle_path ~depth:t.depth ~hashes:t.hash_tbl + self_merkle_path ~depth:t.depth ~hashes:!(t.hashes) ~current_location:t.current_location address with | Some path -> @@ -341,7 +341,7 @@ module Make (Inputs : Inputs_intf.S) = struct let parent_merkle_path = Base.merkle_path_at_addr_exn (get_parent t) address in - fixup_merkle_path ~hashes:t.hash_tbl parent_merkle_path ~address + fixup_merkle_path ~hashes:!(t.hashes) parent_merkle_path ~address let merkle_path_at_index_exn t index = merkle_path_at_addr_exn t (Addr.of_int_exn ~ledger_depth:t.depth index) @@ -355,7 +355,7 @@ module Make (Inputs : Inputs_intf.S) = struct let self_paths = List.map locations ~f:(fun location -> let address = Location.to_path_exn location in - self_lookup ~hashes:t.hash_tbl ~current_location:t.current_location + self_lookup ~hashes:!(t.hashes) ~current_location:t.current_location ~depth:t.depth address |> Option.value_map ~default:(Either.Second (location, address)) @@ -376,7 +376,7 @@ module Make (Inputs : Inputs_intf.S) = struct (parent_paths, path) | Either.Second (_, address) -> let path = - fixup_path ~hashes:t.hash_tbl ~address (List.hd_exn parent_paths) + fixup_path ~hashes:!(t.hashes) ~address (List.hd_exn parent_paths) in (List.tl_exn parent_paths, path) in From c3fcccfee8a938d3be5f90c06db6e42df3f64db3 Mon Sep 17 00:00:00 2001 From: georgeee Date: Wed, 22 Nov 2023 15:11:22 +0100 Subject: [PATCH 081/119] Small refactoring in Masking_merkle_tree --- src/lib/merkle_mask/masking_merkle_tree.ml | 109 +++++++++++---------- 1 file changed, 57 insertions(+), 52 deletions(-) diff --git a/src/lib/merkle_mask/masking_merkle_tree.ml b/src/lib/merkle_mask/masking_merkle_tree.ml index b35ba1d6ad7c..dc21cd839ca1 100644 --- a/src/lib/merkle_mask/masking_merkle_tree.ml +++ b/src/lib/merkle_mask/masking_merkle_tree.ml @@ -38,16 +38,24 @@ module Make (Inputs : Inputs_intf.S) = struct let t_of_sexp (_ : Sexp.t) : t = Async.Ivar.create () end + type maps_t = + { mutable accounts : Account.t Location_binable.Map.t + ; mutable token_owners : Account_id.t Token_id.Map.t + ; mutable hashes : Hash.t Addr.Map.t + ; mutable locations : Location.t Account_id.Map.t + } + [@@deriving sexp] + + let maps_copy { accounts; token_owners; hashes; locations } = + { accounts; token_owners; hashes; locations } + type t = { uuid : Uuid.Stable.V1.t - ; accounts : Account.t Location_binable.Map.t ref - ; token_owners : Account_id.t Token_id.Map.t ref ; mutable parent : Parent.t ; detached_parent_signal : Detached_parent_signal.t - ; hashes : Hash.t Addr.Map.t ref - ; locations : Location.t Account_id.Map.t ref ; mutable current_location : Location.t option ; depth : int + ; maps : maps_t } [@@deriving sexp] @@ -57,12 +65,14 @@ module Make (Inputs : Inputs_intf.S) = struct { uuid = Uuid_unix.create () ; parent = Error __LOC__ ; detached_parent_signal = Async.Ivar.create () - ; accounts = ref Location_binable.Map.empty - ; token_owners = ref Token_id.Map.empty - ; hashes = ref Addr.Map.empty - ; locations = ref Account_id.Map.empty ; current_location = None ; depth + ; maps = + { accounts = Location_binable.Map.empty + ; token_owners = Token_id.Map.empty + ; hashes = Addr.Map.empty + ; locations = Account_id.Map.empty + } } let get_uuid { uuid; _ } = uuid @@ -132,13 +142,10 @@ module Make (Inputs : Inputs_intf.S) = struct let depth t = assert_is_attached t ; t.depth (* don't rely on a particular implementation *) - let self_find_hash t address = - assert_is_attached t ; - Map.find !(t.hashes) address + let self_find_hash t address = Map.find t.maps.hashes address let self_set_hash t address hash = - assert_is_attached t ; - t.hashes := Map.set !(t.hashes) ~key:address ~data:hash + t.maps.hashes <- Map.set t.maps.hashes ~key:address ~data:hash let set_inner_hash_at_addr_exn t address hash = assert_is_attached t ; @@ -146,13 +153,11 @@ module Make (Inputs : Inputs_intf.S) = struct self_set_hash t address hash (* don't rely on a particular implementation *) - let self_find_location t account_id = - assert_is_attached t ; - Map.find !(t.locations) account_id + let self_find_location t account_id = Map.find t.maps.locations account_id let self_set_location t account_id location = - assert_is_attached t ; - t.locations := Map.set !(t.locations) ~key:account_id ~data:location ; + t.maps.locations <- + Map.set t.maps.locations ~key:account_id ~data:location ; (* if account is at a hitherto-unused location, that becomes the current location *) @@ -164,13 +169,10 @@ module Make (Inputs : Inputs_intf.S) = struct t.current_location <- Some location (* don't rely on a particular implementation *) - let self_find_account t location = - assert_is_attached t ; - Map.find !(t.accounts) location + let self_find_account t location = Map.find t.maps.accounts location let self_set_account t location account = - assert_is_attached t ; - t.accounts := Map.set !(t.accounts) ~key:location ~data:account ; + t.maps.accounts <- Map.set t.maps.accounts ~key:location ~data:account ; self_set_location t (Account.identifier account) location (* a read does a lookup in the account_tbl; if that fails, delegate to @@ -332,7 +334,7 @@ module Make (Inputs : Inputs_intf.S) = struct let merkle_path_at_addr_exn t address = assert_is_attached t ; match - self_merkle_path ~depth:t.depth ~hashes:!(t.hashes) + self_merkle_path ~depth:t.depth ~hashes:t.maps.hashes ~current_location:t.current_location address with | Some path -> @@ -341,7 +343,7 @@ module Make (Inputs : Inputs_intf.S) = struct let parent_merkle_path = Base.merkle_path_at_addr_exn (get_parent t) address in - fixup_merkle_path ~hashes:!(t.hashes) parent_merkle_path ~address + fixup_merkle_path ~hashes:t.maps.hashes parent_merkle_path ~address let merkle_path_at_index_exn t index = merkle_path_at_addr_exn t (Addr.of_int_exn ~ledger_depth:t.depth index) @@ -355,8 +357,8 @@ module Make (Inputs : Inputs_intf.S) = struct let self_paths = List.map locations ~f:(fun location -> let address = Location.to_path_exn location in - self_lookup ~hashes:!(t.hashes) ~current_location:t.current_location - ~depth:t.depth address + self_lookup ~hashes:t.maps.hashes + ~current_location:t.current_location ~depth:t.depth address |> Option.value_map ~default:(Either.Second (location, address)) ~f:Either.first ) @@ -376,7 +378,8 @@ module Make (Inputs : Inputs_intf.S) = struct (parent_paths, path) | Either.Second (_, address) -> let path = - fixup_path ~hashes:!(t.hashes) ~address (List.hd_exn parent_paths) + fixup_path ~hashes:t.maps.hashes ~address + (List.hd_exn parent_paths) in (List.tl_exn parent_paths, path) in @@ -424,15 +427,15 @@ module Make (Inputs : Inputs_intf.S) = struct assert_is_attached t ; (* remove account and key from tables *) let account = Option.value_exn (self_find_account t location) in - t.accounts := Map.remove !(t.accounts) location ; + t.maps.accounts <- Map.remove t.maps.accounts location ; (* Update token info. *) let account_id = Account.identifier account in - t.token_owners := - Token_id.Map.remove !(t.token_owners) + t.maps.token_owners <- + Token_id.Map.remove t.maps.token_owners (Account_id.derive_token_id ~owner:account_id) ; (* TODO : use stack database to save unused location, which can be used when allocating a location *) - t.locations := Map.remove !(t.locations) account_id ; + t.maps.locations <- Map.remove t.maps.locations account_id ; (* reuse location if possible *) Option.iter t.current_location ~f:(fun curr_loc -> if Location.equal location curr_loc then @@ -457,8 +460,8 @@ module Make (Inputs : Inputs_intf.S) = struct self_set_account t location account ; (* Update token info. *) let account_id = Account.identifier account in - t.token_owners := - Map.set !(t.token_owners) + t.maps.token_owners <- + Map.set t.maps.token_owners ~key:(Account_id.derive_token_id ~owner:account_id) ~data:account_id @@ -543,10 +546,10 @@ module Make (Inputs : Inputs_intf.S) = struct let commit t = assert_is_attached t ; let old_root_hash = merkle_root t in - let account_data = Map.to_alist !(t.accounts) in + let account_data = Map.to_alist t.maps.accounts in Base.set_batch (get_parent t) account_data ; - t.accounts := Location_binable.Map.empty ; - t.hashes := Addr.Map.empty ; + t.maps.accounts <- Location_binable.Map.empty ; + t.maps.hashes <- Addr.Map.empty ; Debug_assert.debug_assert (fun () -> [%test_result: Hash.t] ~message: @@ -564,12 +567,9 @@ module Make (Inputs : Inputs_intf.S) = struct { uuid = Uuid_unix.create () ; parent = Ok (get_parent t) ; detached_parent_signal = Async.Ivar.create () - ; accounts = ref !(t.accounts) - ; token_owners = ref !(t.token_owners) - ; locations = ref !(t.locations) - ; hashes = ref !(t.hashes) ; current_location = t.current_location ; depth = t.depth + ; maps = maps_copy t.maps } let last_filled t = @@ -623,6 +623,7 @@ module Make (Inputs : Inputs_intf.S) = struct Option.value_exn (get_hash t (Location.to_path_exn location)) let set_raw_hash_batch t locations_and_hashes = + assert_is_attached t ; List.iter locations_and_hashes ~f:(fun (location, hash) -> self_set_hash t (Location.to_path_exn location) hash ) @@ -630,13 +631,14 @@ module Make (Inputs : Inputs_intf.S) = struct t.current_location <- Some last_location ; Mina_stdlib.Nonempty_list.iter account_to_location_list ~f:(fun (key, data) -> - t.locations := Map.set !(t.locations) ~key ~data ) + t.maps.locations <- Map.set t.maps.locations ~key ~data ) let set_raw_account_batch t locations_and_accounts = + assert_is_attached t ; List.iter locations_and_accounts ~f:(fun (location, account) -> let account_id = Account.identifier account in - t.token_owners := - Map.set !(t.token_owners) + t.maps.token_owners <- + Map.set t.maps.token_owners ~key:(Account_id.derive_token_id ~owner:account_id) ~data:account_id ; self_set_account t location account ) @@ -653,7 +655,7 @@ module Make (Inputs : Inputs_intf.S) = struct let token_owner t tid = assert_is_attached t ; - match Map.find !(t.token_owners) tid with + match Map.find t.maps.token_owners tid with | Some id -> Some id | None -> @@ -662,7 +664,7 @@ module Make (Inputs : Inputs_intf.S) = struct let token_owners (t : t) : Account_id.Set.t = assert_is_attached t ; let mask_owners = - Map.fold !(t.token_owners) ~init:Account_id.Set.empty + Map.fold t.maps.token_owners ~init:Account_id.Set.empty ~f:(fun ~key:_tid ~data:owner acc -> Set.add acc owner) in Set.union mask_owners (Base.token_owners (get_parent t)) @@ -670,7 +672,7 @@ module Make (Inputs : Inputs_intf.S) = struct let tokens t pk = assert_is_attached t ; let mask_tokens = - Map.keys !(t.locations) + Map.keys t.maps.locations |> List.filter_map ~f:(fun aid -> if Key.equal pk (Account_id.public_key aid) then Some (Account_id.token_id aid) @@ -801,9 +803,9 @@ module Make (Inputs : Inputs_intf.S) = struct as sometimes this is desired behavior *) let close t = assert_is_attached t ; - t.accounts := Location_binable.Map.empty ; - t.hashes := Addr.Map.empty ; - t.locations := Account_id.Map.empty ; + t.maps.accounts <- Location_binable.Map.empty ; + t.maps.hashes <- Addr.Map.empty ; + t.maps.locations <- Account_id.Map.empty ; Async.Ivar.fill_if_empty t.detached_parent_signal () let index_of_account_exn t key = @@ -847,7 +849,7 @@ module Make (Inputs : Inputs_intf.S) = struct let foldi_with_ignored_accounts t ignored_accounts ~init ~f = assert_is_attached t ; - let locations_and_accounts = Map.to_alist !(t.accounts) in + let locations_and_accounts = Map.to_alist t.maps.accounts in (* parent should ignore accounts in this mask *) let mask_accounts = List.map locations_and_accounts ~f:(fun (_loc, acct) -> @@ -891,9 +893,12 @@ module Make (Inputs : Inputs_intf.S) = struct module For_testing = struct let location_in_mask t location = + assert_is_attached t ; Option.is_some (self_find_account t location) - let address_in_mask t addr = Option.is_some (self_find_hash t addr) + let address_in_mask t addr = + assert_is_attached t ; + Option.is_some (self_find_hash t addr) let current_location t = t.current_location end From cb22e039fcf728fd90d9584a18f12334ad12f471 Mon Sep 17 00:00:00 2001 From: georgeee Date: Wed, 22 Nov 2023 17:22:32 +0100 Subject: [PATCH 082/119] Remove remove_accounts_exn from masking tree --- src/lib/merkle_ledger/any_ledger.ml | 2 - src/lib/merkle_ledger/base_ledger_intf.ml | 2 - src/lib/merkle_ledger/database.ml | 41 -------- src/lib/merkle_ledger/null_ledger.ml | 4 - src/lib/merkle_ledger_tests/test_database.ml | 22 ---- src/lib/merkle_ledger_tests/test_mask.ml | 101 ------------------- src/lib/merkle_mask/masking_merkle_tree.ml | 32 ------ src/lib/mina_base/ledger_intf.ml | 2 - src/lib/mina_base/sparse_ledger_base.ml | 3 - 9 files changed, 209 deletions(-) diff --git a/src/lib/merkle_ledger/any_ledger.ml b/src/lib/merkle_ledger/any_ledger.ml index 0541f47acbd4..2fd41f8573ec 100644 --- a/src/lib/merkle_ledger/any_ledger.ml +++ b/src/lib/merkle_ledger/any_ledger.ml @@ -113,8 +113,6 @@ module Make_base (Inputs : Inputs_intf) : module Addr = Location.Addr - let remove_accounts_exn (T ((module Base), t)) = Base.remove_accounts_exn t - let merkle_path_at_index_exn (T ((module Base), t)) = Base.merkle_path_at_index_exn t diff --git a/src/lib/merkle_ledger/base_ledger_intf.ml b/src/lib/merkle_ledger/base_ledger_intf.ml index f85caa808b50..74c13db4376a 100644 --- a/src/lib/merkle_ledger/base_ledger_intf.ml +++ b/src/lib/merkle_ledger/base_ledger_intf.ml @@ -143,8 +143,6 @@ module type S = sig val get_hash_batch_exn : t -> Location.t list -> hash list - val remove_accounts_exn : t -> account_id list -> unit - (** Triggers when the ledger has been detached and should no longer be accessed. *) diff --git a/src/lib/merkle_ledger/database.ml b/src/lib/merkle_ledger/database.ml index ec1e2c1e8568..7763f03835c0 100644 --- a/src/lib/merkle_ledger/database.ml +++ b/src/lib/merkle_ledger/database.ml @@ -30,8 +30,6 @@ module Make (Inputs : Inputs_intf) : module Db_error = struct type t = Account_location_not_found | Out_of_leaves | Malformed_database [@@deriving sexp] - - exception Db_exception of t end module Path = Merkle_path.Make (Hash) @@ -245,8 +243,6 @@ module Make (Inputs : Inputs_intf) : |> get_generic_batch mdb |> List.map ~f:(Option.bind ~f:parse_location) - let delete mdb key = delete_raw mdb (build_location key) - let set mdb key location = set_raw mdb (build_location key) (Location.serialize ~ledger_depth:mdb.depth location) @@ -361,9 +357,6 @@ module Make (Inputs : Inputs_intf) : Account_id.Stable.Latest.bin_size_t Account_id.Stable.Latest.bin_write_t account_id - let remove (mdb : t) (token_id : Token_id.t) : unit = - delete_raw mdb (build_location token_id) - let all_owners (t : t) : (Token_id.t * Account_id.t) Sequence.t = let deduped_tokens = (* First get the sequence of unique tokens *) @@ -439,18 +432,10 @@ module Make (Inputs : Inputs_intf) : most accounts are not going to be managers. *) Owner.set mdb (Account_id.derive_token_id ~owner:aid) aid - let remove mdb pk tid = update mdb pk ~f:(fun tids -> Set.remove tids tid) - let _remove_several mdb pk rem_tids = update mdb pk ~f:(fun tids -> Set.diff tids (Token_id.Set.of_list rem_tids) ) - let remove_account (mdb : t) (aid : Account_id.t) : unit = - let token = Account_id.token_id aid in - let key = Account_id.public_key aid in - remove mdb key token ; - Owner.remove mdb (Account_id.derive_token_id ~owner:aid) - (** Generate a batch of database changes to add the given tokens. *) let add_batch_create mdb pks_to_tokens = let pks_to_all_tokens = @@ -661,32 +646,6 @@ module Make (Inputs : Inputs_intf) : let merkle_root mdb = get_hash mdb Location.root_hash - let remove_accounts_exn t keys = - let locations = - (* if we don't have a location for all keys, raise an exception *) - let rec loop keys accum = - match keys with - | [] -> - accum (* no need to reverse *) - | key :: rest -> ( - match Account_location.get t key with - | Ok loc -> - loop rest (loc :: accum) - | Error err -> - raise (Db_error.Db_exception err) ) - in - loop keys [] - in - (* N.B.: we're not using stack database here to make available newly-freed - locations *) - List.iter keys ~f:(Account_location.delete t) ; - List.iter keys ~f:(Tokens.remove_account t) ; - List.iter locations ~f:(fun loc -> delete_raw t loc) ; - (* recalculate hashes for each removed account *) - List.iter locations ~f:(fun loc -> - let hash_loc = Location.Hash (Location.to_path_exn loc) in - set_hash t hash_loc Hash.empty_account ) - let merkle_path mdb location = let location = if Location.is_account location then diff --git a/src/lib/merkle_ledger/null_ledger.ml b/src/lib/merkle_ledger/null_ledger.ml index 644dbfc9a5cb..8939ee5262f9 100644 --- a/src/lib/merkle_ledger/null_ledger.ml +++ b/src/lib/merkle_ledger/null_ledger.ml @@ -39,10 +39,6 @@ end = struct let create ~depth () = { uuid = Uuid_unix.create (); depth } - let remove_accounts_exn _t keys = - if List.is_empty keys then () - else failwith "remove_accounts_exn: null ledgers cannot be mutated" - let empty_hash_at_height = Empty_hashes.extensible_cache (module Hash) ~init_hash:Hash.empty_account diff --git a/src/lib/merkle_ledger_tests/test_database.ml b/src/lib/merkle_ledger_tests/test_database.ml index a5bd751d8666..70c21a828d3f 100644 --- a/src/lib/merkle_ledger_tests/test_database.ml +++ b/src/lib/merkle_ledger_tests/test_database.ml @@ -430,28 +430,6 @@ let%test_module "test functor on in memory databases" = Stdlib.List.compare_lengths accounts retrieved_accounts = 0 ) ; assert (List.equal Account.equal accounts retrieved_accounts) ) - let%test_unit "removing accounts restores Merkle root" = - Test.with_instance (fun mdb -> - let num_accounts = 5 in - let account_ids = Account_id.gen_accounts num_accounts in - let balances = - Quickcheck.random_value - (Quickcheck.Generator.list_with_length num_accounts Balance.gen) - in - let accounts = - List.map2_exn account_ids balances ~f:Account.create - in - let merkle_root0 = MT.merkle_root mdb in - List.iter accounts ~f:(fun account -> - ignore @@ create_new_account_exn mdb account ) ; - let merkle_root1 = MT.merkle_root mdb in - (* adding accounts should change the Merkle root *) - assert (not (Hash.equal merkle_root0 merkle_root1)) ; - MT.remove_accounts_exn mdb account_ids ; - (* should see original Merkle root after removing the accounts *) - let merkle_root2 = MT.merkle_root mdb in - assert (Hash.equal merkle_root2 merkle_root0) ) - let%test_unit "fold over account balances" = Test.with_instance (fun mdb -> let num_accounts = 5 in diff --git a/src/lib/merkle_ledger_tests/test_mask.ml b/src/lib/merkle_ledger_tests/test_mask.ml index f273f52d798e..4d59d339c50e 100644 --- a/src/lib/merkle_ledger_tests/test_mask.ml +++ b/src/lib/merkle_ledger_tests/test_mask.ml @@ -420,82 +420,6 @@ module Make (Test : Test_intf) = struct Stdlib.List.compare_lengths base_accounts retrieved_accounts = 0 ) ; assert (List.equal Account.equal expected_accounts retrieved_accounts) ) - let%test_unit "removing accounts from mask restores Merkle root" = - Test.with_instances (fun maskable mask -> - let attached_mask = Maskable.register_mask maskable mask in - let num_accounts = 5 in - let account_ids = Account_id.gen_accounts num_accounts in - let balances = - Quickcheck.random_value - (Quickcheck.Generator.list_with_length num_accounts Balance.gen) - in - let accounts = List.map2_exn account_ids balances ~f:Account.create in - let merkle_root0 = Mask.Attached.merkle_root attached_mask in - List.iter accounts ~f:(fun account -> - ignore @@ create_new_account_exn attached_mask account ) ; - let merkle_root1 = Mask.Attached.merkle_root attached_mask in - (* adding accounts should change the Merkle root *) - assert (not (Hash.equal merkle_root0 merkle_root1)) ; - Mask.Attached.remove_accounts_exn attached_mask account_ids ; - (* should see original Merkle root after removing the accounts *) - let merkle_root2 = Mask.Attached.merkle_root attached_mask in - assert (Hash.equal merkle_root2 merkle_root0) ) - - let%test_unit "removing accounts from parent restores Merkle root" = - Test.with_instances (fun maskable mask -> - let attached_mask = Maskable.register_mask maskable mask in - let num_accounts = 5 in - let account_ids = Account_id.gen_accounts num_accounts in - let balances = - Quickcheck.random_value - (Quickcheck.Generator.list_with_length num_accounts Balance.gen) - in - let accounts = List.map2_exn account_ids balances ~f:Account.create in - let merkle_root0 = Mask.Attached.merkle_root attached_mask in - (* add accounts to parent *) - List.iter accounts ~f:(fun account -> - ignore @@ parent_create_new_account_exn maskable account ) ; - (* observe Merkle root in mask *) - let merkle_root1 = Mask.Attached.merkle_root attached_mask in - (* adding accounts should change the Merkle root *) - assert (not (Hash.equal merkle_root0 merkle_root1)) ; - Mask.Attached.remove_accounts_exn attached_mask account_ids ; - (* should see original Merkle root after removing the accounts *) - let merkle_root2 = Mask.Attached.merkle_root attached_mask in - assert (Hash.equal merkle_root2 merkle_root0) ) - - let%test_unit "removing accounts from parent and mask restores Merkle root" = - Test.with_instances (fun maskable mask -> - let attached_mask = Maskable.register_mask maskable mask in - let num_accounts_parent = 5 in - let num_accounts_mask = 5 in - let num_accounts = num_accounts_parent + num_accounts_mask in - let account_ids = Account_id.gen_accounts num_accounts in - let balances = - Quickcheck.random_value - (Quickcheck.Generator.list_with_length num_accounts Balance.gen) - in - let accounts = List.map2_exn account_ids balances ~f:Account.create in - let parent_accounts, mask_accounts = - List.split_n accounts num_accounts_parent - in - let merkle_root0 = Mask.Attached.merkle_root attached_mask in - (* add accounts to parent *) - List.iter parent_accounts ~f:(fun account -> - ignore @@ parent_create_new_account_exn maskable account ) ; - (* add accounts to mask *) - List.iter mask_accounts ~f:(fun account -> - ignore @@ create_new_account_exn attached_mask account ) ; - (* observe Merkle root in mask *) - let merkle_root1 = Mask.Attached.merkle_root attached_mask in - (* adding accounts should change the Merkle root *) - assert (not (Hash.equal merkle_root0 merkle_root1)) ; - (* remove accounts from mask and parent *) - Mask.Attached.remove_accounts_exn attached_mask account_ids ; - (* should see original Merkle root after removing the accounts *) - let merkle_root2 = Mask.Attached.merkle_root attached_mask in - assert (Hash.equal merkle_root2 merkle_root0) ) - let%test_unit "fold of addition over account balances in parent and mask" = Test.with_instances (fun maskable mask -> let attached_mask = Maskable.register_mask maskable mask in @@ -620,31 +544,6 @@ module Make (Test : Test_intf) = struct | `Added, _new_loc -> [%test_eq: Hash.t] start_hash (merkle_root ledger) ) - let%test_unit "reuse of locations for removed accounts" = - Test.with_instances (fun maskable mask -> - let attached_mask = Maskable.register_mask maskable mask in - let num_accounts = 5 in - let account_ids = Account_id.gen_accounts num_accounts in - let balances = - Quickcheck.random_value - (Quickcheck.Generator.list_with_length num_accounts Balance.gen) - in - let accounts = List.map2_exn account_ids balances ~f:Account.create in - assert ( - Option.is_none - (Mask.Attached.For_testing.current_location attached_mask) ) ; - (* add accounts to mask *) - List.iter accounts ~f:(fun account -> - ignore @@ create_new_account_exn attached_mask account ) ; - assert ( - Option.is_some - (Mask.Attached.For_testing.current_location attached_mask) ) ; - (* remove accounts *) - Mask.Attached.remove_accounts_exn attached_mask account_ids ; - assert ( - Option.is_none - (Mask.Attached.For_testing.current_location attached_mask) ) ) - let%test_unit "num_accounts for unique keys in mask and parent" = Test.with_instances (fun maskable mask -> let attached_mask = Maskable.register_mask maskable mask in diff --git a/src/lib/merkle_mask/masking_merkle_tree.ml b/src/lib/merkle_mask/masking_merkle_tree.ml index dc21cd839ca1..6f76bb40fe9c 100644 --- a/src/lib/merkle_mask/masking_merkle_tree.ml +++ b/src/lib/merkle_mask/masking_merkle_tree.ml @@ -767,38 +767,6 @@ module Make (Inputs : Inputs_intf.S) = struct assert (Addr.depth address <= t.depth) ; get_hash t address |> Option.value_exn - let remove_accounts_exn t keys = - assert_is_attached t ; - let rec loop keys parent_keys mask_locations = - match keys with - | [] -> - (parent_keys, mask_locations) - | key :: rest -> ( - match self_find_location t key with - | None -> - loop rest (key :: parent_keys) mask_locations - | Some loc -> - loop rest parent_keys (loc :: mask_locations) ) - in - (* parent_keys not in mask, may be in parent mask_locations definitely in - mask *) - let parent_keys, mask_locations = loop keys [] [] in - (* allow call to parent to raise an exception if raised, the parent - hasn't removed any accounts, and we don't try to remove any accounts - from mask *) - Base.remove_accounts_exn (get_parent t) parent_keys ; - (* removing accounts in parent succeeded, so proceed with removing - accounts from mask we sort mask locations in reverse order, - potentially allowing reuse of locations *) - let rev_sorted_mask_locations = - List.sort mask_locations ~compare:(fun loc1 loc2 -> - let loc1 = Location.to_path_exn loc1 in - let loc2 = Location.to_path_exn loc2 in - Location.Addr.compare loc2 loc1 ) - in - List.iter rev_sorted_mask_locations - ~f:(remove_account_and_update_hashes t) - (* Destroy intentionally does not commit before destroying as sometimes this is desired behavior *) let close t = diff --git a/src/lib/mina_base/ledger_intf.ml b/src/lib/mina_base/ledger_intf.ml index 1e86851eda20..8d020da799e6 100644 --- a/src/lib/mina_base/ledger_intf.ml +++ b/src/lib/mina_base/ledger_intf.ml @@ -21,8 +21,6 @@ module type S = sig val create_new_account : t -> Account_id.t -> Account.t -> unit Or_error.t - val remove_accounts_exn : t -> Account_id.t list -> unit - val merkle_root : t -> Ledger_hash.t val with_ledger : depth:int -> f:(t -> 'a) -> 'a diff --git a/src/lib/mina_base/sparse_ledger_base.ml b/src/lib/mina_base/sparse_ledger_base.ml index d8fe3128431b..791bf7185cdc 100644 --- a/src/lib/mina_base/sparse_ledger_base.ml +++ b/src/lib/mina_base/sparse_ledger_base.ml @@ -111,9 +111,6 @@ module L = struct let create_new_account t id to_set = get_or_create_account t id to_set |> Or_error.map ~f:ignore - let remove_accounts_exn : t -> Account_id.t list -> unit = - fun _t _xs -> failwith "remove_accounts_exn: not implemented" - let merkle_root : t -> Ledger_hash.t = fun t -> M.merkle_root !t let with_ledger : depth:int -> f:(t -> 'a) -> 'a = From b90eab438e8e3d59195034ed6a2ac2d93fcd0730 Mon Sep 17 00:00:00 2001 From: georgeee Date: Wed, 22 Nov 2023 17:41:05 +0100 Subject: [PATCH 083/119] Remove make_space_for --- src/lib/merkle_ledger/any_ledger.ml | 2 -- src/lib/merkle_ledger/database.ml | 2 -- src/lib/merkle_ledger/null_ledger.ml | 2 -- src/lib/merkle_ledger/syncable_intf.ml | 2 -- src/lib/merkle_mask/masking_merkle_tree.ml | 4 ---- src/lib/syncable_ledger/syncable_ledger.ml | 1 - 6 files changed, 13 deletions(-) diff --git a/src/lib/merkle_ledger/any_ledger.ml b/src/lib/merkle_ledger/any_ledger.ml index 2fd41f8573ec..147fa13e03a2 100644 --- a/src/lib/merkle_ledger/any_ledger.ml +++ b/src/lib/merkle_ledger/any_ledger.ml @@ -182,8 +182,6 @@ module Make_base (Inputs : Inputs_intf) : let to_list_sequential (T ((module Base), t)) = Base.to_list_sequential t - let make_space_for (T ((module Base), t)) = Base.make_space_for t - let get_all_accounts_rooted_at_exn (T ((module Base), t)) = Base.get_all_accounts_rooted_at_exn t diff --git a/src/lib/merkle_ledger/database.ml b/src/lib/merkle_ledger/database.ml index 7763f03835c0..4e0ff0385371 100644 --- a/src/lib/merkle_ledger/database.ml +++ b/src/lib/merkle_ledger/database.ml @@ -196,8 +196,6 @@ module Make (Inputs : Inputs_intf) : assert (Addr.depth address <= mdb.depth) ; set_bin mdb (Location.Hash address) Hash.bin_size_t Hash.bin_write_t hash - let make_space_for _t _tot = () - let get_generic mdb location = assert (Location.is_generic location) ; get_raw mdb location diff --git a/src/lib/merkle_ledger/null_ledger.ml b/src/lib/merkle_ledger/null_ledger.ml index 8939ee5262f9..e9e511f15c2f 100644 --- a/src/lib/merkle_ledger/null_ledger.ml +++ b/src/lib/merkle_ledger/null_ledger.ml @@ -147,8 +147,6 @@ end = struct let to_list_sequential _t = [] - let make_space_for _t _tot = () - let get_all_accounts_rooted_at_exn t addr = let first_node, last_node = Addr.Range.subtree_range ~ledger_depth:t.depth addr diff --git a/src/lib/merkle_ledger/syncable_intf.ml b/src/lib/merkle_ledger/syncable_intf.ml index 6d9d74fe5101..7665106793d8 100644 --- a/src/lib/merkle_ledger/syncable_intf.ml +++ b/src/lib/merkle_ledger/syncable_intf.ml @@ -30,6 +30,4 @@ module type S = sig val get_all_accounts_rooted_at_exn : t -> addr -> (addr * account) list val merkle_root : t -> root_hash - - val make_space_for : t -> int -> unit end diff --git a/src/lib/merkle_mask/masking_merkle_tree.ml b/src/lib/merkle_mask/masking_merkle_tree.ml index 6f76bb40fe9c..6822fdc0f442 100644 --- a/src/lib/merkle_mask/masking_merkle_tree.ml +++ b/src/lib/merkle_mask/masking_merkle_tree.ml @@ -758,10 +758,6 @@ module Make (Inputs : Inputs_intf.S) = struct set_account_unsafe t location account ) (* not needed for in-memory mask; in the database, it's currently a NOP *) - let make_space_for t = - assert_is_attached t ; - Base.make_space_for (get_parent t) - let get_inner_hash_at_addr_exn t address = assert_is_attached t ; assert (Addr.depth address <= t.depth) ; diff --git a/src/lib/syncable_ledger/syncable_ledger.ml b/src/lib/syncable_ledger/syncable_ledger.ml index ba7654d3f1bf..a5db250cda51 100644 --- a/src/lib/syncable_ledger/syncable_ledger.ml +++ b/src/lib/syncable_ledger/syncable_ledger.ml @@ -525,7 +525,6 @@ end = struct (* FIXME: bug when height=0 https://github.com/o1-labs/nanobit/issues/365 *) let actual = complete_with_empties content_hash height (MT.depth t.tree) in if Hash.equal actual rh then ( - MT.make_space_for t.tree n ; Addr.Table.clear t.waiting_parents ; (* We should use this information to set the empty account slots empty and start syncing at the content root. See #1972. *) From 833003ba0877b071b735ed3b0058964c4250a5aa Mon Sep 17 00:00:00 2001 From: georgeee Date: Wed, 22 Nov 2023 18:26:28 +0100 Subject: [PATCH 084/119] Simplify self_xxx functions --- src/lib/merkle_mask/masking_merkle_tree.ml | 36 +++++++++------------- 1 file changed, 14 insertions(+), 22 deletions(-) diff --git a/src/lib/merkle_mask/masking_merkle_tree.ml b/src/lib/merkle_mask/masking_merkle_tree.ml index 6822fdc0f442..8228f6379a46 100644 --- a/src/lib/merkle_mask/masking_merkle_tree.ml +++ b/src/lib/merkle_mask/masking_merkle_tree.ml @@ -141,9 +141,6 @@ module Make (Inputs : Inputs_intf.S) = struct let depth t = assert_is_attached t ; t.depth - (* don't rely on a particular implementation *) - let self_find_hash t address = Map.find t.maps.hashes address - let self_set_hash t address hash = t.maps.hashes <- Map.set t.maps.hashes ~key:address ~data:hash @@ -152,9 +149,6 @@ module Make (Inputs : Inputs_intf.S) = struct assert (Addr.depth address <= t.depth) ; self_set_hash t address hash - (* don't rely on a particular implementation *) - let self_find_location t account_id = Map.find t.maps.locations account_id - let self_set_location t account_id location = t.maps.locations <- Map.set t.maps.locations ~key:account_id ~data:location ; @@ -168,9 +162,6 @@ module Make (Inputs : Inputs_intf.S) = struct if Location.( > ) location loc then t.current_location <- Some location - (* don't rely on a particular implementation *) - let self_find_account t location = Map.find t.maps.accounts location - let self_set_account t location account = t.maps.accounts <- Map.set t.maps.accounts ~key:location ~data:account ; self_set_location t (Account.identifier account) location @@ -179,7 +170,7 @@ module Make (Inputs : Inputs_intf.S) = struct parent *) let get t location = assert_is_attached t ; - match self_find_account t location with + match Map.find t.maps.accounts location with | Some account -> Some account | None -> @@ -218,7 +209,7 @@ module Make (Inputs : Inputs_intf.S) = struct let get_batch t = let self_find id = - let res = self_find_account t id in + let res = Map.find t.maps.accounts id in let res = if Option.is_none res then let is_empty = @@ -417,7 +408,7 @@ module Make (Inputs : Inputs_intf.S) = struct (* use mask Merkle root, if it exists, else get from parent *) let merkle_root t = assert_is_attached t ; - match self_find_hash t (Addr.root ()) with + match Map.find t.maps.hashes (Addr.root ()) with | Some hash -> hash | None -> @@ -426,7 +417,7 @@ module Make (Inputs : Inputs_intf.S) = struct let remove_account_and_update_hashes t location = assert_is_attached t ; (* remove account and key from tables *) - let account = Option.value_exn (self_find_account t location) in + let account = Option.value_exn (Map.find t.maps.accounts location) in t.maps.accounts <- Map.remove t.maps.accounts location ; (* Update token info. *) let account_id = Account.identifier account in @@ -485,11 +476,11 @@ module Make (Inputs : Inputs_intf.S) = struct if the account in the parent is the same in the mask *) let parent_set_notify t account = assert_is_attached t ; - match self_find_location t (Account.identifier account) with + match Map.find t.maps.locations (Account.identifier account) with | None -> () | Some location -> ( - match self_find_account t location with + match Map.find t.maps.accounts location with | Some existing_account -> if Account.equal account existing_account then remove_account_and_update_hashes t location @@ -500,7 +491,7 @@ module Make (Inputs : Inputs_intf.S) = struct parent *) let get_hash t addr = assert_is_attached t ; - match self_find_hash t addr with + match Map.find t.maps.hashes addr with | Some hash -> Some hash | None -> ( @@ -520,7 +511,7 @@ module Make (Inputs : Inputs_intf.S) = struct assert_is_attached t ; let self_hashes_rev = List.rev_map locations ~f:(fun location -> - (location, self_find_hash t (Location.to_path_exn location)) ) + (location, Map.find t.maps.hashes (Location.to_path_exn location)) ) in let parent_locations_rev = List.filter_map self_hashes_rev ~f:(fun (location, hash) -> @@ -696,7 +687,7 @@ module Make (Inputs : Inputs_intf.S) = struct let location_of_account t account_id = assert_is_attached t ; - let mask_result = self_find_location t account_id in + let mask_result = Map.find t.maps.locations account_id in match mask_result with | Some _ -> mask_result @@ -705,7 +696,8 @@ module Make (Inputs : Inputs_intf.S) = struct let location_of_account_batch t = self_find_or_batch_lookup - (fun id -> (id, Option.map ~f:Option.some @@ self_find_location t id)) + (fun id -> + (id, Option.map ~f:Option.some @@ Map.find t.maps.locations id) ) Base.location_of_account_batch t (* Adds specified accounts to the mask by laoding them from parent ledger. @@ -858,11 +850,11 @@ module Make (Inputs : Inputs_intf.S) = struct module For_testing = struct let location_in_mask t location = assert_is_attached t ; - Option.is_some (self_find_account t location) + Option.is_some (Map.find t.maps.accounts location) let address_in_mask t addr = assert_is_attached t ; - Option.is_some (self_find_hash t addr) + Option.is_some (Map.find t.maps.hashes addr) let current_location t = t.current_location end @@ -881,7 +873,7 @@ module Make (Inputs : Inputs_intf.S) = struct (* NB: updates the mutable current_location field in t *) let get_or_create_account t account_id account = assert_is_attached t ; - match self_find_location t account_id with + match Map.find t.maps.locations account_id with | None -> ( (* not in mask, maybe in parent *) match Base.location_of_account (get_parent t) account_id with From aed3521c49338b17211ad01d4e9bf2035b247135 Mon Sep 17 00:00:00 2001 From: georgeee Date: Wed, 22 Nov 2023 20:50:43 +0100 Subject: [PATCH 085/119] Prototype of optimizing lookups over masks Problem: 290 layers of masking trees makes processing of transactions to be O(290*logn) where n is the size of a mask. This 290x factor is a significant slowdown Solution: have a mechanism to replace O(290*logn) to something around O(30*logn). This PR is prototype, without logic integrated into frontier handling --- src/lib/merkle_mask/maskable_merkle_tree.ml | 4 +- .../merkle_mask/maskable_merkle_tree_intf.ml | 5 +- src/lib/merkle_mask/masking_merkle_tree.ml | 165 +++++++++++------- .../merkle_mask/masking_merkle_tree_intf.ml | 7 +- src/lib/mina_ledger/ledger.ml | 6 +- src/lib/mina_ledger/ledger.mli | 5 +- 6 files changed, 126 insertions(+), 66 deletions(-) diff --git a/src/lib/merkle_mask/maskable_merkle_tree.ml b/src/lib/merkle_mask/maskable_merkle_tree.ml index 45235a1970af..b4115b2ff2c1 100644 --- a/src/lib/merkle_mask/maskable_merkle_tree.ml +++ b/src/lib/merkle_mask/maskable_merkle_tree.ml @@ -144,8 +144,8 @@ module Make (Inputs : Inputs_intf) = struct let unsafe_preload_accounts_from_parent = Mask.Attached.unsafe_preload_accounts_from_parent - let register_mask t mask = - let attached_mask = Mask.set_parent mask t in + let register_mask ?accumulated t mask = + let attached_mask = Mask.set_parent ?accumulated mask t in List.iter (Uuid.Table.data registered_masks) ~f:(fun ms -> List.iter ms ~f:(fun m -> [%test_result: bool] diff --git a/src/lib/merkle_mask/maskable_merkle_tree_intf.ml b/src/lib/merkle_mask/maskable_merkle_tree_intf.ml index aa447ab9d1cd..d452fe01a195 100644 --- a/src/lib/merkle_mask/maskable_merkle_tree_intf.ml +++ b/src/lib/merkle_mask/maskable_merkle_tree_intf.ml @@ -8,12 +8,15 @@ module type S = sig type attached_mask + type accumulated_t + (* registering a mask makes it an active child of the parent Merkle tree - reads to the mask that fail are delegated to the parent - writes to the parent notify the child mask *) - val register_mask : t -> unattached_mask -> attached_mask + val register_mask : + ?accumulated:accumulated_t -> t -> unattached_mask -> attached_mask val unsafe_preload_accounts_from_parent : attached_mask -> account_id list -> unit diff --git a/src/lib/merkle_mask/masking_merkle_tree.ml b/src/lib/merkle_mask/masking_merkle_tree.ml index 8228f6379a46..3d6f3db129f4 100644 --- a/src/lib/merkle_mask/masking_merkle_tree.ml +++ b/src/lib/merkle_mask/masking_merkle_tree.ml @@ -49,6 +49,8 @@ module Make (Inputs : Inputs_intf.S) = struct let maps_copy { accounts; token_owners; hashes; locations } = { accounts; token_owners; hashes; locations } + type accumulated_t = { accumulated_maps : maps_t; ancestor : Base.t } + type t = { uuid : Uuid.Stable.V1.t ; mutable parent : Parent.t @@ -56,6 +58,10 @@ module Make (Inputs : Inputs_intf.S) = struct ; mutable current_location : Location.t option ; depth : int ; maps : maps_t + (* If present, contains maps containing changes both for this mask + and for a few ancestors. + This is used as a lookup cache. *) + ; mutable accumulated : (accumulated_t[@sexp.opaque]) option } [@@deriving sexp] @@ -67,6 +73,7 @@ module Make (Inputs : Inputs_intf.S) = struct ; detached_parent_signal = Async.Ivar.create () ; current_location = None ; depth + ; accumulated = None ; maps = { accounts = Location_binable.Map.empty ; token_owners = Token_id.Map.empty @@ -104,6 +111,15 @@ module Make (Inputs : Inputs_intf.S) = struct Dangling_parent_reference of Uuid.t * (* Location where null was set*) string + let to_accumulated t = + match (t.accumulated, t.parent) with + | Some { accumulated_maps; ancestor }, _ -> + { ancestor; accumulated_maps = maps_copy accumulated_maps } + | None, Ok ancestor -> + { ancestor; accumulated_maps = maps_copy t.maps } + | None, Error loc -> + raise (Dangling_parent_reference (t.uuid, loc)) + let create () = failwith "Mask.Attached.create: cannot create an attached mask; use Mask.create \ @@ -117,6 +133,7 @@ module Make (Inputs : Inputs_intf.S) = struct let unset_parent ?(trigger_signal = true) ~loc t = assert (Result.is_ok t.parent) ; t.parent <- Error loc ; + t.accumulated <- None ; if trigger_signal then Async.Ivar.fill_if_empty t.detached_parent_signal () ; t @@ -133,6 +150,13 @@ module Make (Inputs : Inputs_intf.S) = struct let get_parent ({ parent = opt; _ } as t) = assert_is_attached t ; Result.ok_or_failwith opt + let maps_and_ancestor t = + match t.accumulated with + | Some { accumulated_maps; ancestor } -> + (accumulated_maps, ancestor) + | None -> + (t.maps, get_parent t) + let get_uuid t = assert_is_attached t ; t.uuid let get_directory t = @@ -141,8 +165,13 @@ module Make (Inputs : Inputs_intf.S) = struct let depth t = assert_is_attached t ; t.depth + let update_maps ~f t = + f t.maps ; + Option.iter t.accumulated ~f:(fun { accumulated_maps = ms; _ } -> f ms) + let self_set_hash t address hash = - t.maps.hashes <- Map.set t.maps.hashes ~key:address ~data:hash + update_maps t ~f:(fun maps -> + maps.hashes <- Map.set maps.hashes ~key:address ~data:hash ) let set_inner_hash_at_addr_exn t address hash = assert_is_attached t ; @@ -150,8 +179,9 @@ module Make (Inputs : Inputs_intf.S) = struct self_set_hash t address hash let self_set_location t account_id location = - t.maps.locations <- - Map.set t.maps.locations ~key:account_id ~data:location ; + update_maps t ~f:(fun maps -> + maps.locations <- + Map.set maps.locations ~key:account_id ~data:location ) ; (* if account is at a hitherto-unused location, that becomes the current location *) @@ -163,14 +193,21 @@ module Make (Inputs : Inputs_intf.S) = struct t.current_location <- Some location let self_set_account t location account = - t.maps.accounts <- Map.set t.maps.accounts ~key:location ~data:account ; + update_maps t ~f:(fun maps -> + maps.accounts <- Map.set maps.accounts ~key:location ~data:account ) ; self_set_location t (Account.identifier account) location + let self_set_token_owner t token_id account_id = + update_maps t ~f:(fun maps -> + maps.token_owners <- + Map.set maps.token_owners ~key:token_id ~data:account_id ) + (* a read does a lookup in the account_tbl; if that fails, delegate to parent *) let get t location = assert_is_attached t ; - match Map.find t.maps.accounts location with + let maps, ancestor = maps_and_ancestor t in + match Map.find maps.accounts location with | Some account -> Some account | None -> @@ -183,11 +220,12 @@ module Make (Inputs : Inputs_intf.S) = struct let current_address = Location.to_path_exn current_location in Addr.is_further_right ~than:current_address address in - if is_empty then None else Base.get (get_parent t) location + if is_empty then None else Base.get ancestor location let self_find_or_batch_lookup self_find lookup_parent t ids = assert_is_attached t ; - let self_found_or_none = List.map ids ~f:self_find in + let maps, ancestor = maps_and_ancestor t in + let self_found_or_none = List.map ids ~f:(self_find ~maps) in let not_found = List.filter_map self_found_or_none ~f:(function | id, None -> @@ -195,7 +233,7 @@ module Make (Inputs : Inputs_intf.S) = struct | _ -> None ) in - let from_parent = lookup_parent (get_parent t) not_found in + let from_parent = lookup_parent ancestor not_found in List.fold_map self_found_or_none ~init:from_parent ~f:(fun from_parent (id, self_found) -> match (self_found, from_parent) with @@ -208,8 +246,8 @@ module Make (Inputs : Inputs_intf.S) = struct |> snd let get_batch t = - let self_find id = - let res = Map.find t.maps.accounts id in + let self_find ~maps id = + let res = Map.find maps.accounts id in let res = if Option.is_none res then let is_empty = @@ -324,17 +362,18 @@ module Make (Inputs : Inputs_intf.S) = struct let merkle_path_at_addr_exn t address = assert_is_attached t ; + let maps, ancestor = maps_and_ancestor t in match - self_merkle_path ~depth:t.depth ~hashes:t.maps.hashes + self_merkle_path ~depth:t.depth ~hashes:maps.hashes ~current_location:t.current_location address with | Some path -> path | None -> let parent_merkle_path = - Base.merkle_path_at_addr_exn (get_parent t) address + Base.merkle_path_at_addr_exn ancestor address in - fixup_merkle_path ~hashes:t.maps.hashes parent_merkle_path ~address + fixup_merkle_path ~hashes:maps.hashes parent_merkle_path ~address let merkle_path_at_index_exn t index = merkle_path_at_addr_exn t (Addr.of_int_exn ~ledger_depth:t.depth index) @@ -344,12 +383,12 @@ module Make (Inputs : Inputs_intf.S) = struct let path_batch_impl ~fixup_path ~self_lookup ~base_lookup t locations = assert_is_attached t ; - let parent = get_parent t in + let maps, ancestor = maps_and_ancestor t in let self_paths = List.map locations ~f:(fun location -> let address = Location.to_path_exn location in - self_lookup ~hashes:t.maps.hashes - ~current_location:t.current_location ~depth:t.depth address + self_lookup ~hashes:maps.hashes ~current_location:t.current_location + ~depth:t.depth address |> Option.value_map ~default:(Either.Second (location, address)) ~f:Either.first ) @@ -362,15 +401,14 @@ module Make (Inputs : Inputs_intf.S) = struct | Either.Second (location, _) -> Some location ) in - if List.is_empty locs then [] else base_lookup parent locs + if List.is_empty locs then [] else base_lookup ancestor locs in let f parent_paths = function | Either.First path -> (parent_paths, path) | Either.Second (_, address) -> let path = - fixup_path ~hashes:t.maps.hashes ~address - (List.hd_exn parent_paths) + fixup_path ~hashes:maps.hashes ~address (List.hd_exn parent_paths) in (List.tl_exn parent_paths, path) in @@ -408,14 +446,15 @@ module Make (Inputs : Inputs_intf.S) = struct (* use mask Merkle root, if it exists, else get from parent *) let merkle_root t = assert_is_attached t ; - match Map.find t.maps.hashes (Addr.root ()) with + let maps, ancestor = maps_and_ancestor t in + match Map.find maps.hashes (Addr.root ()) with | Some hash -> hash | None -> - Base.merkle_root (get_parent t) + Base.merkle_root ancestor let remove_account_and_update_hashes t location = - assert_is_attached t ; + t.accumulated <- None ; (* remove account and key from tables *) let account = Option.value_exn (Map.find t.maps.accounts location) in t.maps.accounts <- Map.remove t.maps.accounts location ; @@ -451,10 +490,9 @@ module Make (Inputs : Inputs_intf.S) = struct self_set_account t location account ; (* Update token info. *) let account_id = Account.identifier account in - t.maps.token_owners <- - Map.set t.maps.token_owners - ~key:(Account_id.derive_token_id ~owner:account_id) - ~data:account_id + self_set_token_owner t + (Account_id.derive_token_id ~owner:account_id) + account_id (* a write writes only to the mask, parent is not involved need to update both account and hash pieces of the mask *) @@ -491,12 +529,13 @@ module Make (Inputs : Inputs_intf.S) = struct parent *) let get_hash t addr = assert_is_attached t ; - match Map.find t.maps.hashes addr with + let maps, ancestor = maps_and_ancestor t in + match Map.find maps.hashes addr with | Some hash -> Some hash | None -> ( try - let hash = Base.get_inner_hash_at_addr_exn (get_parent t) addr in + let hash = Base.get_inner_hash_at_addr_exn ancestor addr in Some hash with _ -> None ) @@ -509,9 +548,10 @@ module Make (Inputs : Inputs_intf.S) = struct let get_hash_batch_exn t locations = assert_is_attached t ; + let maps, ancestor = maps_and_ancestor t in let self_hashes_rev = List.rev_map locations ~f:(fun location -> - (location, Map.find t.maps.hashes (Location.to_path_exn location)) ) + (location, Map.find maps.hashes (Location.to_path_exn location)) ) in let parent_locations_rev = List.filter_map self_hashes_rev ~f:(fun (location, hash) -> @@ -519,7 +559,7 @@ module Make (Inputs : Inputs_intf.S) = struct in let parent_hashes_rev = if List.is_empty parent_locations_rev then [] - else Base.get_hash_batch_exn (get_parent t) parent_locations_rev + else Base.get_hash_batch_exn ancestor parent_locations_rev in let rec recombine self_hashes_rev parent_hashes_rev acc = match (self_hashes_rev, parent_hashes_rev) with @@ -536,22 +576,22 @@ module Make (Inputs : Inputs_intf.S) = struct (* transfer state from mask to parent; flush local state *) let commit t = assert_is_attached t ; + let parent = get_parent t in let old_root_hash = merkle_root t in let account_data = Map.to_alist t.maps.accounts in - Base.set_batch (get_parent t) account_data ; + Base.set_batch parent account_data ; t.maps.accounts <- Location_binable.Map.empty ; t.maps.hashes <- Addr.Map.empty ; + (* TODO why only 2/4 maps are updated ? *) Debug_assert.debug_assert (fun () -> [%test_result: Hash.t] ~message: "Parent merkle root after committing should be the same as the \ old one in the mask" - ~expect:old_root_hash - (Base.merkle_root (get_parent t)) ; + ~expect:old_root_hash (Base.merkle_root parent) ; [%test_result: Hash.t] ~message:"Merkle root of the mask should delegate to the parent now" - ~expect:(merkle_root t) - (Base.merkle_root (get_parent t)) ) + ~expect:(merkle_root t) (Base.merkle_root parent) ) (* copy tables in t; use same parent *) let copy t = @@ -561,6 +601,9 @@ module Make (Inputs : Inputs_intf.S) = struct ; current_location = t.current_location ; depth = t.depth ; maps = maps_copy t.maps + ; accumulated = + Option.map t.accumulated ~f:(fun acc -> + { acc with accumulated_maps = maps_copy acc.accumulated_maps } ) } let last_filled t = @@ -621,17 +664,15 @@ module Make (Inputs : Inputs_intf.S) = struct let set_location_batch ~last_location t account_to_location_list = t.current_location <- Some last_location ; Mina_stdlib.Nonempty_list.iter account_to_location_list - ~f:(fun (key, data) -> - t.maps.locations <- Map.set t.maps.locations ~key ~data ) + ~f:(fun (key, data) -> self_set_location t key data) let set_raw_account_batch t locations_and_accounts = assert_is_attached t ; List.iter locations_and_accounts ~f:(fun (location, account) -> let account_id = Account.identifier account in - t.maps.token_owners <- - Map.set t.maps.token_owners - ~key:(Account_id.derive_token_id ~owner:account_id) - ~data:account_id ; + self_set_token_owner t + (Account_id.derive_token_id ~owner:account_id) + account_id ; self_set_account t location account ) end) @@ -646,31 +687,34 @@ module Make (Inputs : Inputs_intf.S) = struct let token_owner t tid = assert_is_attached t ; - match Map.find t.maps.token_owners tid with + let maps, ancestor = maps_and_ancestor t in + match Map.find maps.token_owners tid with | Some id -> Some id | None -> - Base.token_owner (get_parent t) tid + Base.token_owner ancestor tid let token_owners (t : t) : Account_id.Set.t = assert_is_attached t ; + let maps, ancestor = maps_and_ancestor t in let mask_owners = - Map.fold t.maps.token_owners ~init:Account_id.Set.empty + Map.fold maps.token_owners ~init:Account_id.Set.empty ~f:(fun ~key:_tid ~data:owner acc -> Set.add acc owner) in - Set.union mask_owners (Base.token_owners (get_parent t)) + Set.union mask_owners (Base.token_owners ancestor) let tokens t pk = assert_is_attached t ; + let maps, ancestor = maps_and_ancestor t in let mask_tokens = - Map.keys t.maps.locations + Map.keys maps.locations |> List.filter_map ~f:(fun aid -> if Key.equal pk (Account_id.public_key aid) then Some (Account_id.token_id aid) else None ) |> Token_id.Set.of_list in - Set.union mask_tokens (Base.tokens (get_parent t) pk) + Set.union mask_tokens (Base.tokens ancestor pk) let num_accounts t = assert_is_attached t ; @@ -687,17 +731,18 @@ module Make (Inputs : Inputs_intf.S) = struct let location_of_account t account_id = assert_is_attached t ; - let mask_result = Map.find t.maps.locations account_id in + let maps, ancestor = maps_and_ancestor t in + let mask_result = Map.find maps.locations account_id in match mask_result with | Some _ -> mask_result | None -> - Base.location_of_account (get_parent t) account_id + Base.location_of_account ancestor account_id let location_of_account_batch t = self_find_or_batch_lookup - (fun id -> - (id, Option.map ~f:Option.some @@ Map.find t.maps.locations id) ) + (fun ~maps id -> + (id, Option.map ~f:Option.some @@ Map.find maps.locations id) ) Base.location_of_account_batch t (* Adds specified accounts to the mask by laoding them from parent ledger. @@ -734,9 +779,7 @@ module Make (Inputs : Inputs_intf.S) = struct in generate_locations non_empty_locations [] in - let all_hashes = - Base.get_hash_batch_exn (get_parent t) all_hash_locations - in + let all_hashes = get_hash_batch_exn t all_hash_locations in (* Batch import merkle paths and self hashes. *) List.iter2_exn all_hash_locations all_hashes ~f:(fun location hash -> let address = Location.to_path_exn location in @@ -805,7 +848,8 @@ module Make (Inputs : Inputs_intf.S) = struct let foldi_with_ignored_accounts t ignored_accounts ~init ~f = assert_is_attached t ; - let locations_and_accounts = Map.to_alist t.maps.accounts in + let maps, ancestor = maps_and_ancestor t in + let locations_and_accounts = Map.to_alist maps.accounts in (* parent should ignore accounts in this mask *) let mask_accounts = List.map locations_and_accounts ~f:(fun (_loc, acct) -> @@ -817,8 +861,7 @@ module Make (Inputs : Inputs_intf.S) = struct in (* in parent, ignore any passed-in ignored accounts and accounts in mask *) let parent_result = - Base.foldi_with_ignored_accounts (get_parent t) all_ignored_accounts - ~init ~f + Base.foldi_with_ignored_accounts ancestor all_ignored_accounts ~init ~f in let f' accum (location, account) = (* for mask, ignore just passed-in ignored accounts *) @@ -873,10 +916,11 @@ module Make (Inputs : Inputs_intf.S) = struct (* NB: updates the mutable current_location field in t *) let get_or_create_account t account_id account = assert_is_attached t ; - match Map.find t.maps.locations account_id with + let maps, ancestor = maps_and_ancestor t in + match Map.find maps.locations account_id with | None -> ( (* not in mask, maybe in parent *) - match Base.location_of_account (get_parent t) account_id with + match Base.location_of_account ancestor account_id with | Some location -> Ok (`Existed, location) | None -> ( @@ -905,12 +949,13 @@ module Make (Inputs : Inputs_intf.S) = struct let location_of_sexp = Location.t_of_sexp end - let set_parent t parent = + let set_parent ?accumulated t parent = assert (Result.is_error t.parent) ; assert (Option.is_none (Async.Ivar.peek t.detached_parent_signal)) ; assert (Int.equal t.depth (Base.depth parent)) ; t.parent <- Ok parent ; t.current_location <- Attached.last_filled t ; + t.accumulated <- accumulated ; t let addr_to_location addr = Location.Account addr diff --git a/src/lib/merkle_mask/masking_merkle_tree_intf.ml b/src/lib/merkle_mask/masking_merkle_tree_intf.ml index 6b366cb9d571..a88ace610186 100644 --- a/src/lib/merkle_mask/masking_merkle_tree_intf.ml +++ b/src/lib/merkle_mask/masking_merkle_tree_intf.ml @@ -32,6 +32,8 @@ module type S = sig val get_uuid : t -> Uuid.t + type accumulated_t + module Attached : sig include Base_merkle_tree_intf.S @@ -85,6 +87,8 @@ module type S = sig *) val unsafe_preload_accounts_from_parent : t -> account_id list -> unit + val to_accumulated : t -> accumulated_t + (** already have module For_testing from include above *) module For_testing : sig val location_in_mask : t -> location -> bool @@ -96,5 +100,6 @@ module type S = sig end (** tell mask about parent *) - val set_parent : unattached -> parent -> Attached.t + val set_parent : + ?accumulated:accumulated_t -> unattached -> parent -> Attached.t end diff --git a/src/lib/mina_ledger/ledger.ml b/src/lib/mina_ledger/ledger.ml index 0272b13c8ca5..2d4fa8f4ae24 100644 --- a/src/lib/mina_ledger/ledger.ml +++ b/src/lib/mina_ledger/ledger.ml @@ -164,6 +164,7 @@ module Ledger_inner = struct and type root_hash := Hash.t and type unattached_mask := Mask.t and type attached_mask := Mask.Attached.t + and type accumulated_t := Mask.accumulated_t and type t := Any_ledger.M.t = Merkle_mask.Maskable_merkle_tree.Make (struct include Inputs @@ -270,7 +271,8 @@ module Ledger_inner = struct let packed t = Any_ledger.cast (module Mask.Attached) t - let register_mask t mask = Maskable.register_mask (packed t) mask + let register_mask ?accumulated t mask = + Maskable.register_mask ?accumulated (packed t) mask let unsafe_preload_accounts_from_parent = Maskable.unsafe_preload_accounts_from_parent @@ -284,6 +286,8 @@ module Ledger_inner = struct type attached_mask = Mask.Attached.t + type accumulated_t = Mask.accumulated_t + (* inside MaskedLedger, the functor argument has assigned to location, account, and path but the module signature for the functor result wants them, so we declare them here *) type location = Location.t diff --git a/src/lib/mina_ledger/ledger.mli b/src/lib/mina_ledger/ledger.mli index e38703f548be..6742e24dcf4b 100644 --- a/src/lib/mina_ledger/ledger.mli +++ b/src/lib/mina_ledger/ledger.mli @@ -56,6 +56,7 @@ module Maskable : and type root_hash := Ledger_hash.t and type unattached_mask := Mask.t and type attached_mask := Mask.Attached.t + and type accumulated_t := Mask.accumulated_t and type t := Any_ledger.M.t include @@ -73,6 +74,7 @@ include and type t = Mask.Attached.t and type attached_mask = Mask.Attached.t and type unattached_mask = Mask.t + and type accumulated_t = Mask.accumulated_t (* We override the type of unregister_mask_exn that comes from Merkle_mask.Maskable_merkle_tree_intf.S because at this level callers aren't @@ -100,7 +102,8 @@ val of_database : Db.t -> t (** This is not _really_ copy, merely a stop-gap until we remove usages of copy in our codebase. What this actually does is creates a new empty mask on top of the current ledger *) val copy : t -> t -val register_mask : t -> Mask.t -> Mask.Attached.t +val register_mask : + ?accumulated:Mask.accumulated_t -> t -> Mask.t -> Mask.Attached.t val commit : Mask.Attached.t -> unit From 7fbea17444e86c8e975ee7623b0c0130627d299b Mon Sep 17 00:00:00 2001 From: georgeee Date: Fri, 24 Nov 2023 23:26:43 +0100 Subject: [PATCH 086/119] Integrate mask accumulation into ledger lifecycle --- src/lib/merkle_mask/maskable_merkle_tree.ml | 113 ++++++++++---------- src/lib/merkle_mask/masking_merkle_tree.ml | 68 ++++++++---- src/lib/mina_ledger/ledger.ml | 5 +- src/lib/mina_ledger/ledger.mli | 3 +- 4 files changed, 111 insertions(+), 78 deletions(-) diff --git a/src/lib/merkle_mask/maskable_merkle_tree.ml b/src/lib/merkle_mask/maskable_merkle_tree.ml index b4115b2ff2c1..ea0e741d41b7 100644 --- a/src/lib/merkle_mask/maskable_merkle_tree.ml +++ b/src/lib/merkle_mask/maskable_merkle_tree.ml @@ -157,67 +157,68 @@ module Make (Inputs : Inputs_intf) = struct Uuid.Table.add_multi registered_masks ~key:(get_uuid t) ~data:attached_mask ; attached_mask - let rec unregister_mask_exn ?(grandchildren = `Check) ~loc - (mask : Mask.Attached.t) : Mask.unattached = + let rec iter_descendants ~f uuid = + List.iter + (Hashtbl.find registered_masks uuid |> Option.value ~default:[]) + ~f:(fun child_mask -> + if f child_mask then + iter_descendants ~f (Mask.Attached.get_uuid child_mask) ) + + let unregister_mask_error_msg ~uuid ~parent_uuid suffix = + sprintf "Couldn't unregister mask with UUID %s from parent %s, %s" + (Uuid.to_string_hum uuid) + (Uuid.to_string_hum parent_uuid) + suffix + + let unregister_mask_exn_do ?trigger_signal mask = + let uuid = Mask.Attached.get_uuid mask in let parent_uuid = Mask.Attached.get_parent mask |> get_uuid in - let error_msg suffix = - sprintf "Couldn't unregister mask with UUID %s from parent %s, %s" - (Mask.Attached.get_uuid mask |> Uuid.to_string_hum) - (Uuid.to_string_hum parent_uuid) - suffix - in - let trigger_detach_signal = - match grandchildren with - | `Check | `Recursive -> - true - | `I_promise_I_am_reparenting_this_mask -> - false - in - ( match grandchildren with - | `Check -> ( - match Hashtbl.find registered_masks (Mask.Attached.get_uuid mask) with - | Some children -> - failwith @@ error_msg - @@ sprintf - !"mask has children that must be unregistered first: %{sexp: \ - Uuid.t list}" - (List.map ~f:Mask.Attached.get_uuid children) - | None -> - () ) - | `I_promise_I_am_reparenting_this_mask -> - () - | `Recursive -> - (* You must not retain any references to children of the mask we're - unregistering if you pass `Recursive, so this is only used in - with_ephemeral_ledger. *) - List.iter - ( Hashtbl.find registered_masks (Mask.Attached.get_uuid mask) - |> Option.value ~default:[] ) - ~f:(fun child_mask -> - ignore - @@ unregister_mask_exn ~loc ~grandchildren:`Recursive child_mask ) - ) ; + let error_msg = unregister_mask_error_msg ~uuid ~parent_uuid in match Uuid.Table.find registered_masks parent_uuid with | None -> failwith @@ error_msg "parent not in registered_masks" | Some masks -> - ( match List.find masks ~f:(fun m -> phys_equal m mask) with - | None -> - failwith @@ error_msg "mask not registered with that parent" - | Some _ -> ( - let bad, good = - List.partition_tf masks ~f:(fun m -> phys_equal m mask) - in - assert (List.length bad = 1) ; - match good with - | [] -> - (* no other masks for this maskable *) - Uuid.Table.remove registered_masks parent_uuid - | other_masks -> - Uuid.Table.set registered_masks ~key:parent_uuid - ~data:other_masks ) ) ; - Mask.Attached.unset_parent ~trigger_signal:trigger_detach_signal ~loc - mask + let bad, good = + List.partition_tf masks ~f:(fun m -> phys_equal m mask) + in + if List.length bad <> 1 then + failwith @@ error_msg "mask not registered with that parent" ; + if List.is_empty good then + (* no other masks for this maskable *) + Uuid.Table.remove registered_masks parent_uuid + else Uuid.Table.set registered_masks ~key:parent_uuid ~data:good ; + Mask.Attached.unset_parent ?trigger_signal mask + + let unregister_mask_exn ?(grandchildren = `Check) ~loc (mask : Mask.Attached.t) + : Mask.unattached = + let uuid = Mask.Attached.get_uuid mask in + let parent_uuid = Mask.Attached.get_parent mask |> get_uuid in + let error_msg = unregister_mask_error_msg ~uuid ~parent_uuid in + let trigger_signal = + match grandchildren with + | `Check -> ( + match Hashtbl.find registered_masks (Mask.Attached.get_uuid mask) with + | Some children -> + failwith @@ error_msg + @@ sprintf + !"mask has children that must be unregistered first: \ + %{sexp: Uuid.t list}" + (List.map ~f:Mask.Attached.get_uuid children) + | None -> + true ) + | `I_promise_I_am_reparenting_this_mask -> + false + | `Recursive -> + (* You must not retain any references to children of the mask we're + unregistering if you pass `Recursive, so this is only used in + with_ephemeral_ledger. *) + iter_descendants uuid + ~f: + ( Fn.compose (Fn.const true) + @@ unregister_mask_exn_do ~trigger_signal:true ~loc ) ; + true + in + unregister_mask_exn_do ~trigger_signal ~loc mask (** a set calls the Base implementation set, notifies registered mask childen *) let set t location account = diff --git a/src/lib/merkle_mask/masking_merkle_tree.ml b/src/lib/merkle_mask/masking_merkle_tree.ml index 3d6f3db129f4..1049357c5337 100644 --- a/src/lib/merkle_mask/masking_merkle_tree.ml +++ b/src/lib/merkle_mask/masking_merkle_tree.ml @@ -49,7 +49,15 @@ module Make (Inputs : Inputs_intf.S) = struct let maps_copy { accounts; token_owners; hashes; locations } = { accounts; token_owners; hashes; locations } - type accumulated_t = { accumulated_maps : maps_t; ancestor : Base.t } + type accumulated_t = + { current : maps_t + ; next : maps_t + ; base : Base.t + ; detached_next_signal : Detached_parent_signal.t + (* Ivar for mask from which next was started being built + When it's fulfilled, "next" becomes "current". + *) + } type t = { uuid : Uuid.Stable.V1.t @@ -67,6 +75,13 @@ module Make (Inputs : Inputs_intf.S) = struct type unattached = t [@@deriving sexp] + let empty_maps () = + { accounts = Location_binable.Map.empty + ; token_owners = Token_id.Map.empty + ; hashes = Addr.Map.empty + ; locations = Account_id.Map.empty + } + let create ~depth () = { uuid = Uuid_unix.create () ; parent = Error __LOC__ @@ -74,12 +89,7 @@ module Make (Inputs : Inputs_intf.S) = struct ; current_location = None ; depth ; accumulated = None - ; maps = - { accounts = Location_binable.Map.empty - ; token_owners = Token_id.Map.empty - ; hashes = Addr.Map.empty - ; locations = Account_id.Map.empty - } + ; maps = empty_maps () } let get_uuid { uuid; _ } = uuid @@ -113,10 +123,18 @@ module Make (Inputs : Inputs_intf.S) = struct let to_accumulated t = match (t.accumulated, t.parent) with - | Some { accumulated_maps; ancestor }, _ -> - { ancestor; accumulated_maps = maps_copy accumulated_maps } - | None, Ok ancestor -> - { ancestor; accumulated_maps = maps_copy t.maps } + | Some { base; detached_next_signal; next; current }, _ -> + { base + ; detached_next_signal + ; next = maps_copy next + ; current = maps_copy current + } + | None, Ok base -> + { base + ; next = maps_copy t.maps + ; current = maps_copy t.maps + ; detached_next_signal = t.detached_parent_signal + } | None, Error loc -> raise (Dangling_parent_reference (t.uuid, loc)) @@ -133,9 +151,9 @@ module Make (Inputs : Inputs_intf.S) = struct let unset_parent ?(trigger_signal = true) ~loc t = assert (Result.is_ok t.parent) ; t.parent <- Error loc ; - t.accumulated <- None ; - if trigger_signal then - Async.Ivar.fill_if_empty t.detached_parent_signal () ; + if trigger_signal then ( + t.accumulated <- None ; + Async.Ivar.fill_if_empty t.detached_parent_signal () ) ; t let assert_is_attached t = @@ -151,9 +169,19 @@ module Make (Inputs : Inputs_intf.S) = struct assert_is_attached t ; Result.ok_or_failwith opt let maps_and_ancestor t = + Option.iter t.accumulated + ~f:(fun { detached_next_signal; next; base; current = _ } -> + if Async.Ivar.is_full detached_next_signal then + t.accumulated <- + Some + { next = empty_maps () + ; current = next + ; detached_next_signal = t.detached_parent_signal + ; base + } ) ; match t.accumulated with - | Some { accumulated_maps; ancestor } -> - (accumulated_maps, ancestor) + | Some { current; base; _ } -> + (current, base) | None -> (t.maps, get_parent t) @@ -167,7 +195,8 @@ module Make (Inputs : Inputs_intf.S) = struct let update_maps ~f t = f t.maps ; - Option.iter t.accumulated ~f:(fun { accumulated_maps = ms; _ } -> f ms) + Option.iter t.accumulated ~f:(fun { current; next; _ } -> + f current ; f next ) let self_set_hash t address hash = update_maps t ~f:(fun maps -> @@ -603,7 +632,10 @@ module Make (Inputs : Inputs_intf.S) = struct ; maps = maps_copy t.maps ; accumulated = Option.map t.accumulated ~f:(fun acc -> - { acc with accumulated_maps = maps_copy acc.accumulated_maps } ) + { acc with + next = maps_copy acc.next + ; current = maps_copy acc.current + } ) } let last_filled t = diff --git a/src/lib/mina_ledger/ledger.ml b/src/lib/mina_ledger/ledger.ml index 2d4fa8f4ae24..ff5a12bdbe65 100644 --- a/src/lib/mina_ledger/ledger.ml +++ b/src/lib/mina_ledger/ledger.ml @@ -271,8 +271,9 @@ module Ledger_inner = struct let packed t = Any_ledger.cast (module Mask.Attached) t - let register_mask ?accumulated t mask = - Maskable.register_mask ?accumulated (packed t) mask + let register_mask t = + let accumulated = Mask.Attached.to_accumulated t in + Maskable.register_mask ~accumulated (packed t) let unsafe_preload_accounts_from_parent = Maskable.unsafe_preload_accounts_from_parent diff --git a/src/lib/mina_ledger/ledger.mli b/src/lib/mina_ledger/ledger.mli index 6742e24dcf4b..adc8643d2cc6 100644 --- a/src/lib/mina_ledger/ledger.mli +++ b/src/lib/mina_ledger/ledger.mli @@ -102,8 +102,7 @@ val of_database : Db.t -> t (** This is not _really_ copy, merely a stop-gap until we remove usages of copy in our codebase. What this actually does is creates a new empty mask on top of the current ledger *) val copy : t -> t -val register_mask : - ?accumulated:Mask.accumulated_t -> t -> Mask.t -> Mask.Attached.t +val register_mask : t -> Mask.t -> Mask.Attached.t val commit : Mask.Attached.t -> unit From 1b5779ebeba576acc014cd465f6190bea822aaab Mon Sep 17 00:00:00 2001 From: georgeee Date: Sat, 25 Nov 2023 09:23:03 +0100 Subject: [PATCH 087/119] Fix merkle_ledger_tests --- src/lib/merkle_ledger_tests/test_mask.ml | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/src/lib/merkle_ledger_tests/test_mask.ml b/src/lib/merkle_ledger_tests/test_mask.ml index 4d59d339c50e..eea869a57530 100644 --- a/src/lib/merkle_ledger_tests/test_mask.ml +++ b/src/lib/merkle_ledger_tests/test_mask.ml @@ -716,13 +716,18 @@ module Make_maskable_and_mask_with_depth (Depth : Depth_S) = struct and type hash := Hash.t and type unattached_mask := Mask.t and type attached_mask := Mask.Attached.t - and type t := Base.t = Merkle_mask.Maskable_merkle_tree.Make (struct - include Inputs - module Base = Base - module Mask = Mask + and type accumulated_t = Mask.accumulated_t + and type t := Base.t = struct + type accumulated_t = Mask.accumulated_t - let mask_to_base m = Any_base.cast (module Mask.Attached) m - end) + include Merkle_mask.Maskable_merkle_tree.Make (struct + include Inputs + module Base = Base + module Mask = Mask + + let mask_to_base m = Any_base.cast (module Mask.Attached) m + end) + end (* test runner *) let with_instances f = From 3dba63a0f599cc198e3746ec96e0b06e22057fd4 Mon Sep 17 00:00:00 2001 From: georgeee Date: Thu, 30 Nov 2023 11:31:13 +0100 Subject: [PATCH 088/119] Fix bug in Mask's copy --- src/lib/merkle_mask/masking_merkle_tree.ml | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/src/lib/merkle_mask/masking_merkle_tree.ml b/src/lib/merkle_mask/masking_merkle_tree.ml index 1049357c5337..12140f7c7f6e 100644 --- a/src/lib/merkle_mask/masking_merkle_tree.ml +++ b/src/lib/merkle_mask/masking_merkle_tree.ml @@ -624,16 +624,18 @@ module Make (Inputs : Inputs_intf.S) = struct (* copy tables in t; use same parent *) let copy t = + let detached_parent_signal = Async.Ivar.create () in { uuid = Uuid_unix.create () ; parent = Ok (get_parent t) - ; detached_parent_signal = Async.Ivar.create () + ; detached_parent_signal ; current_location = t.current_location ; depth = t.depth ; maps = maps_copy t.maps ; accumulated = Option.map t.accumulated ~f:(fun acc -> - { acc with - next = maps_copy acc.next + { base = acc.base + ; detached_next_signal = detached_parent_signal + ; next = maps_copy acc.next ; current = maps_copy acc.current } ) } From d05242cf7fb4b2a2dc0a83a093f0095d63a1aac9 Mon Sep 17 00:00:00 2001 From: georgeee Date: Thu, 30 Nov 2023 11:33:29 +0100 Subject: [PATCH 089/119] Nit: remove Fn.compose Fn.id --- src/lib/mina_base/ledger_hash.ml | 2 +- src/lib/mina_base/pending_coinbase.ml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/lib/mina_base/ledger_hash.ml b/src/lib/mina_base/ledger_hash.ml index 3862ae47cf39..0ae6909ab4de 100644 --- a/src/lib/mina_base/ledger_hash.ml +++ b/src/lib/mina_base/ledger_hash.ml @@ -45,7 +45,7 @@ let empty_hash = of_hash Outside_hash_image.t let%bench "Ledger_hash.merge ~height:1 empty_hash empty_hash" = merge ~height:1 empty_hash empty_hash -let of_digest = Fn.compose Fn.id of_hash +let of_digest = of_hash type path = Random_oracle.Digest.t list diff --git a/src/lib/mina_base/pending_coinbase.ml b/src/lib/mina_base/pending_coinbase.ml index dc8639181ba2..44996128dbef 100644 --- a/src/lib/mina_base/pending_coinbase.ml +++ b/src/lib/mina_base/pending_coinbase.ml @@ -396,7 +396,7 @@ module Make_str (A : Wire_types.Concrete) = struct let empty_hash = Random_oracle.(digest (salt "PendingCoinbaseMerkleTree")) |> of_hash - let of_digest = Fn.compose Fn.id of_hash + let of_digest = of_hash end module Update = struct From a2e68ac7c7cefc4c91f45d08054bea1241220059 Mon Sep 17 00:00:00 2001 From: georgeee Date: Fri, 1 Dec 2023 23:41:45 +0100 Subject: [PATCH 090/119] Fix couple of bugs in reparenting --- src/lib/merkle_mask/masking_merkle_tree.ml | 27 +++++++++++++++++++--- 1 file changed, 24 insertions(+), 3 deletions(-) diff --git a/src/lib/merkle_mask/masking_merkle_tree.ml b/src/lib/merkle_mask/masking_merkle_tree.ml index 12140f7c7f6e..493a853f9710 100644 --- a/src/lib/merkle_mask/masking_merkle_tree.ml +++ b/src/lib/merkle_mask/masking_merkle_tree.ml @@ -49,6 +49,16 @@ module Make (Inputs : Inputs_intf.S) = struct let maps_copy { accounts; token_owners; hashes; locations } = { accounts; token_owners; hashes; locations } + (** Merges second maps object into the first one, + potentially overwriting some keys *) + let maps_merge base { accounts; token_owners; hashes; locations } = + let combine ~key:_ _ v = v in + base.accounts <- Map.merge_skewed ~combine base.accounts accounts ; + base.token_owners <- + Map.merge_skewed ~combine base.token_owners token_owners ; + base.hashes <- Map.merge_skewed ~combine base.hashes hashes ; + base.locations <- Map.merge_skewed ~combine base.locations locations + type accumulated_t = { current : maps_t ; next : maps_t @@ -174,7 +184,7 @@ module Make (Inputs : Inputs_intf.S) = struct if Async.Ivar.is_full detached_next_signal then t.accumulated <- Some - { next = empty_maps () + { next = t.maps ; current = next ; detached_next_signal = t.detached_parent_signal ; base @@ -983,13 +993,24 @@ module Make (Inputs : Inputs_intf.S) = struct let location_of_sexp = Location.t_of_sexp end - let set_parent ?accumulated t parent = + let set_parent ?accumulated:accumulated_opt t parent = assert (Result.is_error t.parent) ; assert (Option.is_none (Async.Ivar.peek t.detached_parent_signal)) ; assert (Int.equal t.depth (Base.depth parent)) ; t.parent <- Ok parent ; t.current_location <- Attached.last_filled t ; - t.accumulated <- accumulated ; + (* If [t.accumulated] isn't empty, then this mask had a parent before + and now we just reparent it (which may only happen if both old and new parents + have the same merkle root (and some masks in between may have been removed), + hence no need to modify [t.accumulated]) *) + ( match accumulated_opt with + | Some { current; next; base; detached_next_signal } + when Option.is_none t.accumulated -> + maps_merge current t.maps ; + maps_merge next t.maps ; + t.accumulated <- Some { current; next; base; detached_next_signal } + | _ -> + () ) ; t let addr_to_location addr = Location.Account addr From 4c97874e9e9046834163caf672cfbf0cb2d25078 Mon Sep 17 00:00:00 2001 From: georgeee Date: Sat, 2 Dec 2023 01:28:37 +0100 Subject: [PATCH 091/119] Avoid force-evaluation-induced bug --- src/lib/mina_ledger/ledger.ml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/lib/mina_ledger/ledger.ml b/src/lib/mina_ledger/ledger.ml index ff5a12bdbe65..f4fd61a37ff9 100644 --- a/src/lib/mina_ledger/ledger.ml +++ b/src/lib/mina_ledger/ledger.ml @@ -271,9 +271,9 @@ module Ledger_inner = struct let packed t = Any_ledger.cast (module Mask.Attached) t - let register_mask t = + let register_mask t mask = let accumulated = Mask.Attached.to_accumulated t in - Maskable.register_mask ~accumulated (packed t) + Maskable.register_mask ~accumulated (packed t) mask let unsafe_preload_accounts_from_parent = Maskable.unsafe_preload_accounts_from_parent From b6d0391565a5a96578cd2b24f8cc40fc5f2c40cb Mon Sep 17 00:00:00 2001 From: Tang Jiawei Date: Mon, 4 Dec 2023 18:08:22 +0800 Subject: [PATCH 092/119] fix the order of unset_parent when unregister_mask --- src/lib/merkle_ledger/database.ml | 4 +++- src/lib/merkle_mask/maskable_merkle_tree.ml | 12 ++++++------ 2 files changed, 9 insertions(+), 7 deletions(-) diff --git a/src/lib/merkle_ledger/database.ml b/src/lib/merkle_ledger/database.ml index 4e0ff0385371..863fcbcdf54a 100644 --- a/src/lib/merkle_ledger/database.ml +++ b/src/lib/merkle_ledger/database.ml @@ -670,7 +670,9 @@ module Make (Inputs : Inputs_intf) : List.map locations ~f:Location.merkle_path_dependencies_exn in let all_locs = - List.map list_of_dependencies ~f:(fun deps -> List.map ~f:fst deps |> expand_query) |> List.concat + List.map list_of_dependencies ~f:(fun deps -> + List.map ~f:fst deps |> expand_query ) + |> List.concat in let hashes = get_hash_batch_exn mdb all_locs in snd @@ List.fold_map ~init:hashes ~f:compute_path list_of_dependencies diff --git a/src/lib/merkle_mask/maskable_merkle_tree.ml b/src/lib/merkle_mask/maskable_merkle_tree.ml index ea0e741d41b7..536f63b6b116 100644 --- a/src/lib/merkle_mask/maskable_merkle_tree.ml +++ b/src/lib/merkle_mask/maskable_merkle_tree.ml @@ -161,8 +161,8 @@ module Make (Inputs : Inputs_intf) = struct List.iter (Hashtbl.find registered_masks uuid |> Option.value ~default:[]) ~f:(fun child_mask -> - if f child_mask then - iter_descendants ~f (Mask.Attached.get_uuid child_mask) ) + iter_descendants ~f (Mask.Attached.get_uuid child_mask) ; + f child_mask ) let unregister_mask_error_msg ~uuid ~parent_uuid suffix = sprintf "Couldn't unregister mask with UUID %s from parent %s, %s" @@ -212,10 +212,10 @@ module Make (Inputs : Inputs_intf) = struct (* You must not retain any references to children of the mask we're unregistering if you pass `Recursive, so this is only used in with_ephemeral_ledger. *) - iter_descendants uuid - ~f: - ( Fn.compose (Fn.const true) - @@ unregister_mask_exn_do ~trigger_signal:true ~loc ) ; + iter_descendants uuid ~f:(fun mask -> + ignore + ( unregister_mask_exn_do ~trigger_signal:true ~loc mask + : Mask.unattached ) ) ; true in unregister_mask_exn_do ~trigger_signal ~loc mask From bbe1860dff199f0a797233d227e887f1e1f8aaee Mon Sep 17 00:00:00 2001 From: Tang Jiawei Date: Tue, 5 Dec 2023 03:15:29 +0800 Subject: [PATCH 093/119] remove `remove_accounts_exn` --- src/lib/snarkyjs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/lib/snarkyjs b/src/lib/snarkyjs index 9042677986ac..8530e25a4c1d 160000 --- a/src/lib/snarkyjs +++ b/src/lib/snarkyjs @@ -1 +1 @@ -Subproject commit 9042677986ac3d2465aab75a228f089972c6cdb6 +Subproject commit 8530e25a4c1dd05d4740cc303ba0c155f43ac486 From 6a5b1123740c72328f4842c71c9c176318b89dfd Mon Sep 17 00:00:00 2001 From: Steven Platt <31355889+stevenplatt@users.noreply.github.com> Date: Mon, 4 Dec 2023 14:56:03 -0500 Subject: [PATCH 094/119] ci test repairs --- buildkite/scripts/connect-to-berkeley.sh | 4 ++-- buildkite/scripts/connect-to-mainnet-on-compatible.sh | 4 ++-- helm/archive-node/templates/db-bootstrap.yaml | 6 ++++-- 3 files changed, 8 insertions(+), 6 deletions(-) diff --git a/buildkite/scripts/connect-to-berkeley.sh b/buildkite/scripts/connect-to-berkeley.sh index bc991d645730..ee508e7b2878 100755 --- a/buildkite/scripts/connect-to-berkeley.sh +++ b/buildkite/scripts/connect-to-berkeley.sh @@ -43,10 +43,10 @@ mina daemon \ --libp2p-keypair "/root/libp2p-keys/key" \ & # -background -# Attempt to connect to the GraphQL client every 10s for up to 4 minutes +# Attempt to connect to the GraphQL client every 30s for up to 12 minutes num_status_retries=24 for ((i=1;i<=$num_status_retries;i++)); do - sleep 10s + sleep 30s set +e mina client status status_exit_code=$? diff --git a/buildkite/scripts/connect-to-mainnet-on-compatible.sh b/buildkite/scripts/connect-to-mainnet-on-compatible.sh index 982e78923ff1..17fe04fe4577 100755 --- a/buildkite/scripts/connect-to-mainnet-on-compatible.sh +++ b/buildkite/scripts/connect-to-mainnet-on-compatible.sh @@ -37,10 +37,10 @@ mina daemon \ & # -background -# Attempt to connect to the GraphQL client every 10s for up to 4 minutes +# Attempt to connect to the GraphQL client every 30s for up to 12 minutes num_status_retries=24 for ((i=1;i<=$num_status_retries;i++)); do - sleep 10s + sleep 30s set +e mina client status status_exit_code=$? diff --git a/helm/archive-node/templates/db-bootstrap.yaml b/helm/archive-node/templates/db-bootstrap.yaml index 8267a0cff7db..0735a8bb2671 100644 --- a/helm/archive-node/templates/db-bootstrap.yaml +++ b/helm/archive-node/templates/db-bootstrap.yaml @@ -9,7 +9,7 @@ spec: containers: {{- if .Values.archive.initFromDump }} - name: import-dump - image: gcr.io/o1labs-192920/postgresql-curl:latest + image: postgres:15-alpine env: - name: PGPASSWORD valueFrom: @@ -19,6 +19,7 @@ spec: command: ["bash", "-c"] args: - 'sleep 30 + && apk add curl && cd /tmp && curl https://storage.googleapis.com/mina-archive-dumps/{{ .Values.testnetName }}-archive-dump-$(date -Idate)_0000.sql.tar.gz -o {{ .Values.testnetName }}-archive-dump.tar.gz && tar -xvf {{ .Values.testnetName }}-archive-dump.tar.gz @@ -37,7 +38,7 @@ spec: -c "ALTER DATABASE {{ .Values.postgresql.auth.database }} SET DEFAULT_TRANSACTION_ISOLATION TO SERIALIZABLE;"' {{- else }} - name: import-schema - image: gcr.io/o1labs-192920/postgresql-curl:latest + image: postgres:15-alpine env: - name: PGPASSWORD valueFrom: @@ -47,6 +48,7 @@ spec: command: ["bash", "-c"] args: - 'sleep 30 + && apk add curl && cd /tmp && {{ range .Values.archive.remoteSchemaAuxFiles }} curl -O {{.}} && {{ end }} psql From 2be17633b9177e03b020a5e7a5c4e6ac38ed2172 Mon Sep 17 00:00:00 2001 From: georgeee Date: Tue, 5 Dec 2023 01:01:51 +0100 Subject: [PATCH 095/119] Safeguard against unexpected parent update Problem: accumulated solution is heavily relied upon assumption of immutability of a parent ledger/mask. If this assumption is accidentally broken, this might be problematic. Solution: when a conflicting update to parent is detected, log it and reset the accumulated state all down the ancestry. --- src/lib/merkle_mask/dune | 2 + src/lib/merkle_mask/maskable_merkle_tree.ml | 32 ++++++++++++--- src/lib/merkle_mask/masking_merkle_tree.ml | 41 ++++++++++++------- .../merkle_mask/masking_merkle_tree_intf.ml | 6 +++ 4 files changed, 61 insertions(+), 20 deletions(-) diff --git a/src/lib/merkle_mask/dune b/src/lib/merkle_mask/dune index 02538f4c1322..bcf75bb1cde7 100644 --- a/src/lib/merkle_mask/dune +++ b/src/lib/merkle_mask/dune @@ -24,9 +24,11 @@ mina_stdlib direction empty_hashes + logger ) (preprocess (pps + ppx_mina ppx_compare ppx_deriving.show ppx_deriving_yojson diff --git a/src/lib/merkle_mask/maskable_merkle_tree.ml b/src/lib/merkle_mask/maskable_merkle_tree.ml index 536f63b6b116..03cd755ee93c 100644 --- a/src/lib/merkle_mask/maskable_merkle_tree.ml +++ b/src/lib/merkle_mask/maskable_merkle_tree.ml @@ -25,6 +25,8 @@ module Make (Inputs : Inputs_intf) = struct open Inputs include Base + let logger = Logger.create () + (** Maps parent ledger UUIDs to child masks. *) let (registered_masks : Mask.Attached.t list Uuid.Table.t) = Uuid.Table.create () @@ -223,12 +225,22 @@ module Make (Inputs : Inputs_intf) = struct (** a set calls the Base implementation set, notifies registered mask childen *) let set t location account = Base.set t location account ; - match Uuid.Table.find registered_masks (get_uuid t) with + let uuid = get_uuid t in + match Uuid.Table.find registered_masks uuid with | None -> () | Some masks -> List.iter masks ~f:(fun mask -> - Mask.Attached.parent_set_notify mask account ) + if not (Mask.Attached.is_committing mask) then ( + Mask.Attached.parent_set_notify mask account ; + let child_uuid = Mask.Attached.get_uuid mask in +Mask.Attached.drop_accumulated mask; + iter_descendants child_uuid ~f:Mask.Attached.drop_accumulated ; + [%log error] + "Update of an account in parent %s conflicted with an account \ + in mask %s" + (Uuid.to_string_hum uuid) + (Uuid.to_string_hum child_uuid) ) ) let remove_and_reparent_exn t t_as_mask = let parent = Mask.Attached.get_parent t_as_mask in @@ -248,13 +260,23 @@ module Make (Inputs : Inputs_intf) = struct ignore (register_mask parent m : Mask.Attached.t) ) let batch_notify_mask_children t accounts = - match Uuid.Table.find registered_masks (get_uuid t) with + let uuid = get_uuid t in + match Uuid.Table.find registered_masks uuid with | None -> () | Some masks -> List.iter masks ~f:(fun mask -> - List.iter accounts ~f:(fun account -> - Mask.Attached.parent_set_notify mask account ) ) + if not (Mask.Attached.is_committing mask) then ( + let child_uuid = Mask.Attached.get_uuid mask in +Mask.Attached.drop_accumulated mask; + iter_descendants child_uuid ~f:Mask.Attached.drop_accumulated ; + [%log error] + "Update of an account in parent %s conflicted with an account \ + in mask %s" + (Uuid.to_string_hum uuid) + (Uuid.to_string_hum child_uuid) ; + List.iter accounts ~f:(fun account -> + Mask.Attached.parent_set_notify mask account ) ) ) let set_batch t locations_and_accounts = Base.set_batch t locations_and_accounts ; diff --git a/src/lib/merkle_mask/masking_merkle_tree.ml b/src/lib/merkle_mask/masking_merkle_tree.ml index 493a853f9710..1066bedf873d 100644 --- a/src/lib/merkle_mask/masking_merkle_tree.ml +++ b/src/lib/merkle_mask/masking_merkle_tree.ml @@ -80,6 +80,7 @@ module Make (Inputs : Inputs_intf.S) = struct and for a few ancestors. This is used as a lookup cache. *) ; mutable accumulated : (accumulated_t[@sexp.opaque]) option + ; mutable is_committing : bool } [@@deriving sexp] @@ -100,6 +101,7 @@ module Make (Inputs : Inputs_intf.S) = struct ; depth ; accumulated = None ; maps = empty_maps () + ; is_committing = false } let get_uuid { uuid; _ } = uuid @@ -493,7 +495,6 @@ module Make (Inputs : Inputs_intf.S) = struct Base.merkle_root ancestor let remove_account_and_update_hashes t location = - t.accumulated <- None ; (* remove account and key from tables *) let account = Option.value_exn (Map.find t.maps.accounts location) in t.maps.accounts <- Map.remove t.maps.accounts location ; @@ -550,19 +551,22 @@ module Make (Inputs : Inputs_intf.S) = struct self_set_hash t addr hash ) (* if the mask's parent sets an account, we can prune an entry in the mask - if the account in the parent is the same in the mask *) + if the account in the parent is the same in the mask + + returns true is the mask is in the state of being comitted *) let parent_set_notify t account = assert_is_attached t ; - match Map.find t.maps.locations (Account.identifier account) with - | None -> - () - | Some location -> ( - match Map.find t.maps.accounts location with - | Some existing_account -> - if Account.equal account existing_account then - remove_account_and_update_hashes t location - | None -> - () ) + Option.value ~default:() + @@ let%bind.Option location = + Map.find t.maps.locations (Account.identifier account) + in + let%bind.Option existing_account = Map.find t.maps.accounts location in + let%map.Option () = + Option.some_if (Account.equal account existing_account) () + in + remove_account_and_update_hashes t location + + let is_committing t = t.is_committing (* as for accounts, we see if we have it in the mask, else delegate to parent *) @@ -614,14 +618,17 @@ module Make (Inputs : Inputs_intf.S) = struct (* transfer state from mask to parent; flush local state *) let commit t = + assert (not t.is_committing) ; + t.is_committing <- true ; assert_is_attached t ; let parent = get_parent t in let old_root_hash = merkle_root t in let account_data = Map.to_alist t.maps.accounts in - Base.set_batch parent account_data ; t.maps.accounts <- Location_binable.Map.empty ; t.maps.hashes <- Addr.Map.empty ; - (* TODO why only 2/4 maps are updated ? *) + t.maps.locations <- Account_id.Map.empty ; + t.maps.token_owners <- Token_id.Map.empty ; + Base.set_batch parent account_data ; Debug_assert.debug_assert (fun () -> [%test_result: Hash.t] ~message: @@ -630,7 +637,8 @@ module Make (Inputs : Inputs_intf.S) = struct ~expect:old_root_hash (Base.merkle_root parent) ; [%test_result: Hash.t] ~message:"Merkle root of the mask should delegate to the parent now" - ~expect:(merkle_root t) (Base.merkle_root parent) ) + ~expect:(merkle_root t) (Base.merkle_root parent) ) ; + t.is_committing <- false (* copy tables in t; use same parent *) let copy t = @@ -648,6 +656,7 @@ module Make (Inputs : Inputs_intf.S) = struct ; next = maps_copy acc.next ; current = maps_copy acc.current } ) + ; is_committing = false } let last_filled t = @@ -673,6 +682,8 @@ module Make (Inputs : Inputs_intf.S) = struct "last_filled: expected account locations for the parent \ and mask" ) ) + let drop_accumulated t = t.accumulated <- None + include Merkle_ledger.Util.Make (struct module Location = Location module Location_binable = Location_binable diff --git a/src/lib/merkle_mask/masking_merkle_tree_intf.ml b/src/lib/merkle_mask/masking_merkle_tree_intf.ml index a88ace610186..cf4a54df36fa 100644 --- a/src/lib/merkle_mask/masking_merkle_tree_intf.ml +++ b/src/lib/merkle_mask/masking_merkle_tree_intf.ml @@ -89,6 +89,12 @@ module type S = sig val to_accumulated : t -> accumulated_t + (** Drop accumulated structure, a method used in safeguard against + unwanted modification of ancestor's mask *) + val drop_accumulated : t -> unit + + val is_committing : t -> bool + (** already have module For_testing from include above *) module For_testing : sig val location_in_mask : t -> location -> bool From bacfd17277641c8accf83a8f50b97fb04606a2d7 Mon Sep 17 00:00:00 2001 From: georgeee Date: Tue, 5 Dec 2023 01:42:55 +0100 Subject: [PATCH 096/119] Actualize accumulated in to_accumulated --- src/lib/merkle_mask/masking_merkle_tree.ml | 42 ++++++++++++---------- 1 file changed, 23 insertions(+), 19 deletions(-) diff --git a/src/lib/merkle_mask/masking_merkle_tree.ml b/src/lib/merkle_mask/masking_merkle_tree.ml index 1066bedf873d..aa1c958b7ab0 100644 --- a/src/lib/merkle_mask/masking_merkle_tree.ml +++ b/src/lib/merkle_mask/masking_merkle_tree.ml @@ -133,23 +133,6 @@ module Make (Inputs : Inputs_intf.S) = struct Dangling_parent_reference of Uuid.t * (* Location where null was set*) string - let to_accumulated t = - match (t.accumulated, t.parent) with - | Some { base; detached_next_signal; next; current }, _ -> - { base - ; detached_next_signal - ; next = maps_copy next - ; current = maps_copy current - } - | None, Ok base -> - { base - ; next = maps_copy t.maps - ; current = maps_copy t.maps - ; detached_next_signal = t.detached_parent_signal - } - | None, Error loc -> - raise (Dangling_parent_reference (t.uuid, loc)) - let create () = failwith "Mask.Attached.create: cannot create an attached mask; use Mask.create \ @@ -180,7 +163,7 @@ module Make (Inputs : Inputs_intf.S) = struct let get_parent ({ parent = opt; _ } as t) = assert_is_attached t ; Result.ok_or_failwith opt - let maps_and_ancestor t = + let actualize_accumulated t = Option.iter t.accumulated ~f:(fun { detached_next_signal; next; base; current = _ } -> if Async.Ivar.is_full detached_next_signal then @@ -190,13 +173,34 @@ module Make (Inputs : Inputs_intf.S) = struct ; current = next ; detached_next_signal = t.detached_parent_signal ; base - } ) ; + } ) + + let maps_and_ancestor t = + actualize_accumulated t ; match t.accumulated with | Some { current; base; _ } -> (current, base) | None -> (t.maps, get_parent t) + let to_accumulated t = + actualize_accumulated t ; + match (t.accumulated, t.parent) with + | Some { base; detached_next_signal; next; current }, _ -> + { base + ; detached_next_signal + ; next = maps_copy next + ; current = maps_copy current + } + | None, Ok base -> + { base + ; next = maps_copy t.maps + ; current = maps_copy t.maps + ; detached_next_signal = t.detached_parent_signal + } + | None, Error loc -> + raise (Dangling_parent_reference (t.uuid, loc)) + let get_uuid t = assert_is_attached t ; t.uuid let get_directory t = From d325d03a1f44432d9edc7f93a7d29ba6dae26a70 Mon Sep 17 00:00:00 2001 From: georgeee Date: Tue, 5 Dec 2023 02:04:48 +0100 Subject: [PATCH 097/119] Add comments about accumulated structure --- src/lib/merkle_mask/masking_merkle_tree.ml | 34 ++++++++++++++++++++-- 1 file changed, 31 insertions(+), 3 deletions(-) diff --git a/src/lib/merkle_mask/masking_merkle_tree.ml b/src/lib/merkle_mask/masking_merkle_tree.ml index aa1c958b7ab0..f351dee111ca 100644 --- a/src/lib/merkle_mask/masking_merkle_tree.ml +++ b/src/lib/merkle_mask/masking_merkle_tree.ml @@ -59,13 +59,35 @@ module Make (Inputs : Inputs_intf.S) = struct base.hashes <- Map.merge_skewed ~combine base.hashes hashes ; base.locations <- Map.merge_skewed ~combine base.locations locations + (** Structure managing cache accumulated since the "base" ledger. + + Its purpose is to optimize lookups through a few consequitive masks + (by using just one map lookup instead of [O(number of masks)] map lookups). + + With a number of mask around 290, this trick gives a sizeable performance improvement. + + Accumulator is inherited from parent mask if [set_parent ~accumulated] of a child + is called with [to_acumulated t] of the parent mask. + + Structure maintains two caches: [current] and [next], with the former + being always a superset of a latter and [next] always being superset of mask's contents + from [maps] field. These two caches are being rotated according to a certain rule + to ensure that no much more memory is used within accumulator as compared to the case + when [accumulated = None] for all masks. + + Garbage-collection/rotation mechanism for [next] and [current] is based on idea to set + [current] to [next] and [next] to [t.maps] when the mask at which accumulation of [next] started + became detached. *) type accumulated_t = { current : maps_t + (** Currently used cache: contains a superset of contents of masks from base ledger to the current mask *) ; next : maps_t - ; base : Base.t + (** Cache that will be used after the current cache is garbage-collected *) + ; base : Base.t (** Base ledger *) ; detached_next_signal : Detached_parent_signal.t - (* Ivar for mask from which next was started being built - When it's fulfilled, "next" becomes "current". + (** Ivar for mask from which next was started being built. + When it's fulfilled, [next] becomes [current] (because next contains superset of all masks from [baser], + [detached_signal] is reset to the current mask and [next] is set to contents of the current mask. *) } @@ -163,6 +185,8 @@ module Make (Inputs : Inputs_intf.S) = struct let get_parent ({ parent = opt; _ } as t) = assert_is_attached t ; Result.ok_or_failwith opt + (** Check whether mask from which we started computing the [next] + accumulator is detached and [current] can be garbage-collected. *) let actualize_accumulated t = Option.iter t.accumulated ~f:(fun { detached_next_signal; next; base; current = _ } -> @@ -175,6 +199,9 @@ module Make (Inputs : Inputs_intf.S) = struct ; base } ) + (** When [accumulated] is not configured, returns current [t.maps] and parent. + + Otherwise, returns the [current] accumulator and [base]. *) let maps_and_ancestor t = actualize_accumulated t ; match t.accumulated with @@ -183,6 +210,7 @@ module Make (Inputs : Inputs_intf.S) = struct | None -> (t.maps, get_parent t) + (** Either copies accumulated or initializes it with the parent being used as the [base]. *) let to_accumulated t = actualize_accumulated t ; match (t.accumulated, t.parent) with From 7fdb2a874a7b33336863b1948c27645663d4b958 Mon Sep 17 00:00:00 2001 From: georgeee Date: Tue, 5 Dec 2023 02:05:18 +0100 Subject: [PATCH 098/119] Revert "Fix bug in Mask's copy" Reverts a faulty change (original version was correct). This reverts commit 3dba63a0f599cc198e3746ec96e0b06e22057fd4. --- src/lib/merkle_mask/masking_merkle_tree.ml | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/src/lib/merkle_mask/masking_merkle_tree.ml b/src/lib/merkle_mask/masking_merkle_tree.ml index f351dee111ca..eadc747687d8 100644 --- a/src/lib/merkle_mask/masking_merkle_tree.ml +++ b/src/lib/merkle_mask/masking_merkle_tree.ml @@ -674,18 +674,16 @@ module Make (Inputs : Inputs_intf.S) = struct (* copy tables in t; use same parent *) let copy t = - let detached_parent_signal = Async.Ivar.create () in { uuid = Uuid_unix.create () ; parent = Ok (get_parent t) - ; detached_parent_signal + ; detached_parent_signal = Async.Ivar.create () ; current_location = t.current_location ; depth = t.depth ; maps = maps_copy t.maps ; accumulated = Option.map t.accumulated ~f:(fun acc -> - { base = acc.base - ; detached_next_signal = detached_parent_signal - ; next = maps_copy acc.next + { acc with + next = maps_copy acc.next ; current = maps_copy acc.current } ) ; is_committing = false From 7b88587cfb3b7c11c04cccb1579d70d588a93932 Mon Sep 17 00:00:00 2001 From: dkijania Date: Thu, 2 Nov 2023 09:56:46 +0100 Subject: [PATCH 099/119] Connect to testworld-2-0 instead of berkeley for connectivity tests --- buildkite/scripts/connect-to-berkeley.sh | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/buildkite/scripts/connect-to-berkeley.sh b/buildkite/scripts/connect-to-berkeley.sh index ee508e7b2878..b0cf04135543 100755 --- a/buildkite/scripts/connect-to-berkeley.sh +++ b/buildkite/scripts/connect-to-berkeley.sh @@ -16,16 +16,17 @@ export DEBIAN_FRONTEND=noninteractive apt-get update apt-get install -y git apt-transport-https ca-certificates tzdata curl -TESTNET_NAME="berkeley" +TESTNET_VERSION_NAME="berkeley" +TESTNET_NAME="testworld-2-0" git config --global --add safe.directory /workdir source buildkite/scripts/export-git-env-vars.sh -echo "Installing mina daemon package: mina-${TESTNET_NAME}=${MINA_DEB_VERSION}" +echo "Installing mina daemon package: mina-${TESTNET_VERSION_NAME}=${MINA_DEB_VERSION}" echo "deb [trusted=yes] http://packages.o1test.net $MINA_DEB_CODENAME $MINA_DEB_RELEASE" | tee /etc/apt/sources.list.d/mina.list apt-get update -apt-get install --allow-downgrades -y "mina-${TESTNET_NAME}=${MINA_DEB_VERSION}" +apt-get install --allow-downgrades -y "mina-${TESTNET_VERSION_NAME}=${MINA_DEB_VERSION}" # Remove lockfile if present rm ~/.mina-config/.mina-lock ||: From edd6473eef6a7003f0a3a9c43beaee191638197a Mon Sep 17 00:00:00 2001 From: dkijania Date: Tue, 28 Nov 2023 22:10:45 +0100 Subject: [PATCH 100/119] rename testname --- buildkite/scripts/connect-to-testworld-2-0.sh | 71 +++++++++++++++++++ buildkite/src/Command/ConnectToTestnet.dhall | 6 +- 2 files changed, 74 insertions(+), 3 deletions(-) create mode 100755 buildkite/scripts/connect-to-testworld-2-0.sh diff --git a/buildkite/scripts/connect-to-testworld-2-0.sh b/buildkite/scripts/connect-to-testworld-2-0.sh new file mode 100755 index 000000000000..7b6386ace55c --- /dev/null +++ b/buildkite/scripts/connect-to-testworld-2-0.sh @@ -0,0 +1,71 @@ +#!/bin/bash + +set -eo pipefail + +case "$BUILDKITE_PULL_REQUEST_BASE_BRANCH" in + rampup|berkeley|release/2.0.0|develop) + ;; + *) + echo "Not pulling against rampup, not running the connect test" + exit 0 ;; +esac + +# Don't prompt for answers during apt-get install +export DEBIAN_FRONTEND=noninteractive + +apt-get update +apt-get install -y git apt-transport-https ca-certificates tzdata curl + +TESTNET_VERSION_NAME="berkeley" +TESTNET_NAME="testworld-2-0" + +git config --global --add safe.directory /workdir + +source buildkite/scripts/export-git-env-vars.sh + +echo "Installing mina daemon package: mina-${TESTNET_VERSION_NAME}=${MINA_DEB_VERSION}" +echo "deb [trusted=yes] http://packages.o1test.net $MINA_DEB_CODENAME $MINA_DEB_RELEASE" | tee /etc/apt/sources.list.d/mina.list +apt-get update +apt-get install --allow-downgrades -y "mina-${TESTNET_VERSION_NAME}=${MINA_DEB_VERSION}" + +# Remove lockfile if present +rm ~/.mina-config/.mina-lock ||: + +mkdir -p /root/libp2p-keys/ +# Pre-generated random password for this quick test +export MINA_LIBP2P_PASS=eithohShieshichoh8uaJ5iefo1reiRudaekohG7AeCeib4XuneDet2uGhu7lahf +mina libp2p generate-keypair --privkey-path /root/libp2p-keys/key +# Set permissions on the keypair so the daemon doesn't complain +chmod -R 0700 /root/libp2p-keys/ + +# Restart in the background +mina daemon \ + --peer-list-url "https://storage.googleapis.com/seed-lists/${TESTNET_NAME}_seeds.txt" \ + --libp2p-keypair "/root/libp2p-keys/key" \ +& # -background + +# Attempt to connect to the GraphQL client every 10s for up to 4 minutes +num_status_retries=24 +for ((i=1;i<=$num_status_retries;i++)); do + sleep 10s + set +e + mina client status + status_exit_code=$? + set -e + if [ $status_exit_code -eq 0 ]; then + break + elif [ $i -eq $num_status_retries ]; then + exit $status_exit_code + fi +done + +# Check that the daemon has connected to peers and is still up after 2 mins +sleep 5m +mina client status +if [ $(mina advanced get-peers | wc -l) -gt 0 ]; then + echo "Found some peers" +else + echo "No peers found" + exit 1 +fi + diff --git a/buildkite/src/Command/ConnectToTestnet.dhall b/buildkite/src/Command/ConnectToTestnet.dhall index 821bdbf106fd..41e641910a21 100644 --- a/buildkite/src/Command/ConnectToTestnet.dhall +++ b/buildkite/src/Command/ConnectToTestnet.dhall @@ -17,11 +17,11 @@ let Cmd = ../Lib/Cmds.dhall in Cmd.Docker::{ image = (../Constants/ContainerImages.dhall).ubuntu2004 } - "./buildkite/scripts/connect-to-berkeley.sh" + "./buildkite/scripts/connect-to-testworld-2-0.sh" ], - label = "Connect to Berkeley", + label = "Connect to Testworld 2.0", soft_fail = Some (B/SoftFail.Boolean True), - key = "connect-to-berkeley", + key = "connect-to-testworld-2-0", target = Size.Large, depends_on = dependsOn } From 31195c5e86a196ce56e280b252db8b04a2f8e6ef Mon Sep 17 00:00:00 2001 From: dkijania Date: Wed, 29 Nov 2023 11:01:24 +0100 Subject: [PATCH 101/119] extend waiting for connectio a bit --- buildkite/scripts/connect-to-testworld-2-0.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/buildkite/scripts/connect-to-testworld-2-0.sh b/buildkite/scripts/connect-to-testworld-2-0.sh index 7b6386ace55c..ea380764efbd 100755 --- a/buildkite/scripts/connect-to-testworld-2-0.sh +++ b/buildkite/scripts/connect-to-testworld-2-0.sh @@ -44,10 +44,10 @@ mina daemon \ --libp2p-keypair "/root/libp2p-keys/key" \ & # -background -# Attempt to connect to the GraphQL client every 10s for up to 4 minutes +# Attempt to connect to the GraphQL client every 10s for up to 8 minutes num_status_retries=24 for ((i=1;i<=$num_status_retries;i++)); do - sleep 10s + sleep 20s set +e mina client status status_exit_code=$? @@ -60,7 +60,7 @@ for ((i=1;i<=$num_status_retries;i++)); do done # Check that the daemon has connected to peers and is still up after 2 mins -sleep 5m +sleep 2m mina client status if [ $(mina advanced get-peers | wc -l) -gt 0 ]; then echo "Found some peers" From e515e8af15f7eb763eebda5122db4f49a38907cb Mon Sep 17 00:00:00 2001 From: dkijania Date: Wed, 29 Nov 2023 13:32:34 +0100 Subject: [PATCH 102/119] refactor tesnet scripts --- ...testworld-2-0.sh => connect-to-testnet.sh} | 17 ++++++--- buildkite/src/Command/ConnectToTestnet.dhall | 35 ++++++++++--------- .../src/Jobs/Test/ConnectToBerkeley.dhall | 4 +-- .../Jobs/Test/ConnectToTestworld-2-0.dhall | 27 ++++++++++++++ 4 files changed, 60 insertions(+), 23 deletions(-) rename buildkite/scripts/{connect-to-testworld-2-0.sh => connect-to-testnet.sh} (87%) mode change 100755 => 100644 create mode 100644 buildkite/src/Jobs/Test/ConnectToTestworld-2-0.dhall diff --git a/buildkite/scripts/connect-to-testworld-2-0.sh b/buildkite/scripts/connect-to-testnet.sh old mode 100755 new mode 100644 similarity index 87% rename from buildkite/scripts/connect-to-testworld-2-0.sh rename to buildkite/scripts/connect-to-testnet.sh index ea380764efbd..ddeacde32e52 --- a/buildkite/scripts/connect-to-testworld-2-0.sh +++ b/buildkite/scripts/connect-to-testnet.sh @@ -2,6 +2,16 @@ set -eo pipefail +if [[ $# -ne 4 ]]; then + echo "Usage: $0 '' ''''" + exit 1 +fi + +TESTNET_VERSION_NAME="berkeley" +TESTNET_NAME=$1 +WAIT_BETWEEN_POLLING_GRAPHQL=$2 +WAIT_AFTER_FINAL_CHECK=$3 + case "$BUILDKITE_PULL_REQUEST_BASE_BRANCH" in rampup|berkeley|release/2.0.0|develop) ;; @@ -16,9 +26,6 @@ export DEBIAN_FRONTEND=noninteractive apt-get update apt-get install -y git apt-transport-https ca-certificates tzdata curl -TESTNET_VERSION_NAME="berkeley" -TESTNET_NAME="testworld-2-0" - git config --global --add safe.directory /workdir source buildkite/scripts/export-git-env-vars.sh @@ -47,7 +54,7 @@ mina daemon \ # Attempt to connect to the GraphQL client every 10s for up to 8 minutes num_status_retries=24 for ((i=1;i<=$num_status_retries;i++)); do - sleep 20s + sleep $WAIT_BETWEEN_POLLING_GRAPHQL set +e mina client status status_exit_code=$? @@ -60,7 +67,7 @@ for ((i=1;i<=$num_status_retries;i++)); do done # Check that the daemon has connected to peers and is still up after 2 mins -sleep 2m +sleep $WAIT_AFTER_FINAL_CHECK mina client status if [ $(mina advanced get-peers | wc -l) -gt 0 ]; then echo "Found some peers" diff --git a/buildkite/src/Command/ConnectToTestnet.dhall b/buildkite/src/Command/ConnectToTestnet.dhall index 41e641910a21..de89df498a73 100644 --- a/buildkite/src/Command/ConnectToTestnet.dhall +++ b/buildkite/src/Command/ConnectToTestnet.dhall @@ -9,20 +9,23 @@ let B/SoftFail = B.definitions/commandStep/properties/soft_fail/Type let Cmd = ../Lib/Cmds.dhall in -{ step = \(dependsOn : List Command.TaggedKey.Type) -> - Command.build - Command.Config::{ - commands = [ - Cmd.runInDocker - Cmd.Docker::{ - image = (../Constants/ContainerImages.dhall).ubuntu2004 - } - "./buildkite/scripts/connect-to-testworld-2-0.sh" - ], - label = "Connect to Testworld 2.0", - soft_fail = Some (B/SoftFail.Boolean True), - key = "connect-to-testworld-2-0", - target = Size.Large, - depends_on = dependsOn - } +{ step = \(dependsOn : List Command.TaggedKey.Type) + -> \(testnet : Text) + -> \(wait_between_graphql_poll : Text) + -> \(wait_before_final_check: Text ) + -> Command.build + Command.Config::{ + commands = [ + Cmd.runInDocker + Cmd.Docker::{ + image = (../Constants/ContainerImages.dhall).ubuntu2004 + } + "./buildkite/scripts/connect-testnet.sh $testnet $wait_between_graphql_poll wait_before_final_check" + ], + label = "Connect to $testnet", + soft_fail = Some (B/SoftFail.Boolean True), + key = "connect-to-$testnet", + target = Size.Large, + depends_on = dependsOn + } } diff --git a/buildkite/src/Jobs/Test/ConnectToBerkeley.dhall b/buildkite/src/Jobs/Test/ConnectToBerkeley.dhall index d6f404bfcf5a..2cef3f0e4cf8 100644 --- a/buildkite/src/Jobs/Test/ConnectToBerkeley.dhall +++ b/buildkite/src/Jobs/Test/ConnectToBerkeley.dhall @@ -14,7 +14,7 @@ in Pipeline.build Pipeline.Config::{ JobSpec::{ dirtyWhen = [ S.strictlyStart (S.contains "src"), - S.exactly "buildkite/scripts/connect-to-berkeley" "sh", + S.exactly "buildkite/scripts/connect-to-testnet" "sh", S.exactly "buildkite/src/Jobs/Test/ConnectToBerkeley" "dhall", S.exactly "buildkite/src/Command/ConnectToTestnet" "dhall" ], @@ -22,6 +22,6 @@ in Pipeline.build Pipeline.Config::{ name = "ConnectToBerkeley" }, steps = [ - ConnectToTestnet.step dependsOn + ConnectToTestnet.step dependsOn "berkeley" "40s" "2m" ] } diff --git a/buildkite/src/Jobs/Test/ConnectToTestworld-2-0.dhall b/buildkite/src/Jobs/Test/ConnectToTestworld-2-0.dhall new file mode 100644 index 000000000000..53009104c4ab --- /dev/null +++ b/buildkite/src/Jobs/Test/ConnectToTestworld-2-0.dhall @@ -0,0 +1,27 @@ +let S = ../../Lib/SelectFiles.dhall + +let JobSpec = ../../Pipeline/JobSpec.dhall +let Pipeline = ../../Pipeline/Dsl.dhall + +let ConnectToTestnet = ../../Command/ConnectToTestnet.dhall + +let dependsOn = [ + { name = "MinaArtifactBullseye", key = "daemon-berkeley-bullseye-docker-image" } +] + +in Pipeline.build Pipeline.Config::{ + spec = + JobSpec::{ + dirtyWhen = [ + S.strictlyStart (S.contains "src"), + S.exactly "buildkite/scripts/connect-to-testnet" "sh", + S.exactly "buildkite/src/Jobs/Test/ConnectToTestworld-2-0" "dhall", + S.exactly "buildkite/src/Command/ConnectToTestnet" "dhall" + ], + path = "Test", + name = "ConnectToTestworld-2-0" + }, + steps = [ + ConnectToTestnet.step dependsOn "testworld-2-0" "40s" "2m" + ] +} From 22ac0b565fb26d378dc4ebfc88522492bb077ed7 Mon Sep 17 00:00:00 2001 From: dkijania Date: Wed, 29 Nov 2023 13:56:37 +0100 Subject: [PATCH 103/119] fix env variables --- buildkite/src/Command/ConnectToTestnet.dhall | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/buildkite/src/Command/ConnectToTestnet.dhall b/buildkite/src/Command/ConnectToTestnet.dhall index de89df498a73..2474c72da0a3 100644 --- a/buildkite/src/Command/ConnectToTestnet.dhall +++ b/buildkite/src/Command/ConnectToTestnet.dhall @@ -20,11 +20,11 @@ let Cmd = ../Lib/Cmds.dhall in Cmd.Docker::{ image = (../Constants/ContainerImages.dhall).ubuntu2004 } - "./buildkite/scripts/connect-testnet.sh $testnet $wait_between_graphql_poll wait_before_final_check" + "./buildkite/scripts/connect-testnet.sh ${testnet} ${wait_between_graphql_poll} ${wait_before_final_check}" ], - label = "Connect to $testnet", + label = "Connect to ${testnet}", soft_fail = Some (B/SoftFail.Boolean True), - key = "connect-to-$testnet", + key = "connect-to-${testnet}", target = Size.Large, depends_on = dependsOn } From 4a9dfa6fe963a987e1c394924d30e80f43144f03 Mon Sep 17 00:00:00 2001 From: dkijania Date: Wed, 29 Nov 2023 14:02:06 +0100 Subject: [PATCH 104/119] set connection to itn on rampup as hardfail --- buildkite/src/Command/ConnectToTestnet.dhall | 3 ++- buildkite/src/Jobs/Test/ConnectToBerkeley.dhall | 4 ++-- buildkite/src/Jobs/Test/ConnectToTestworld-2-0.dhall | 3 ++- 3 files changed, 6 insertions(+), 4 deletions(-) diff --git a/buildkite/src/Command/ConnectToTestnet.dhall b/buildkite/src/Command/ConnectToTestnet.dhall index 2474c72da0a3..6112ef4340bf 100644 --- a/buildkite/src/Command/ConnectToTestnet.dhall +++ b/buildkite/src/Command/ConnectToTestnet.dhall @@ -13,6 +13,7 @@ let Cmd = ../Lib/Cmds.dhall in -> \(testnet : Text) -> \(wait_between_graphql_poll : Text) -> \(wait_before_final_check: Text ) + -> \(soft_fail: B/SoftFail.Boolean) -> Command.build Command.Config::{ commands = [ @@ -23,7 +24,7 @@ let Cmd = ../Lib/Cmds.dhall in "./buildkite/scripts/connect-testnet.sh ${testnet} ${wait_between_graphql_poll} ${wait_before_final_check}" ], label = "Connect to ${testnet}", - soft_fail = Some (B/SoftFail.Boolean True), + soft_fail = Some soft_fail, key = "connect-to-${testnet}", target = Size.Large, depends_on = dependsOn diff --git a/buildkite/src/Jobs/Test/ConnectToBerkeley.dhall b/buildkite/src/Jobs/Test/ConnectToBerkeley.dhall index 2cef3f0e4cf8..968bbe2dd36c 100644 --- a/buildkite/src/Jobs/Test/ConnectToBerkeley.dhall +++ b/buildkite/src/Jobs/Test/ConnectToBerkeley.dhall @@ -1,5 +1,5 @@ let S = ../../Lib/SelectFiles.dhall - +let B/SoftFail = B.definitions/commandStep/properties/soft_fail/Type let JobSpec = ../../Pipeline/JobSpec.dhall let Pipeline = ../../Pipeline/Dsl.dhall @@ -22,6 +22,6 @@ in Pipeline.build Pipeline.Config::{ name = "ConnectToBerkeley" }, steps = [ - ConnectToTestnet.step dependsOn "berkeley" "40s" "2m" + ConnectToTestnet.step dependsOn "berkeley" "40s" "2m" (B/SoftFail.Boolean True) ] } diff --git a/buildkite/src/Jobs/Test/ConnectToTestworld-2-0.dhall b/buildkite/src/Jobs/Test/ConnectToTestworld-2-0.dhall index 53009104c4ab..27eb31435155 100644 --- a/buildkite/src/Jobs/Test/ConnectToTestworld-2-0.dhall +++ b/buildkite/src/Jobs/Test/ConnectToTestworld-2-0.dhall @@ -1,4 +1,5 @@ let S = ../../Lib/SelectFiles.dhall +let B/SoftFail = B.definitions/commandStep/properties/soft_fail/Type let JobSpec = ../../Pipeline/JobSpec.dhall let Pipeline = ../../Pipeline/Dsl.dhall @@ -22,6 +23,6 @@ in Pipeline.build Pipeline.Config::{ name = "ConnectToTestworld-2-0" }, steps = [ - ConnectToTestnet.step dependsOn "testworld-2-0" "40s" "2m" + ConnectToTestnet.step dependsOn "testworld-2-0" "40s" "2m" (B/SoftFail.Boolean False) ] } From cf97c75a6fd6f338c80878c07ddcc5f2d3bfdd8a Mon Sep 17 00:00:00 2001 From: dkijania Date: Wed, 29 Nov 2023 15:20:57 +0100 Subject: [PATCH 105/119] fix path --- buildkite/src/Command/ConnectToTestnet.dhall | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/buildkite/src/Command/ConnectToTestnet.dhall b/buildkite/src/Command/ConnectToTestnet.dhall index 6112ef4340bf..80f91a28389c 100644 --- a/buildkite/src/Command/ConnectToTestnet.dhall +++ b/buildkite/src/Command/ConnectToTestnet.dhall @@ -21,7 +21,7 @@ let Cmd = ../Lib/Cmds.dhall in Cmd.Docker::{ image = (../Constants/ContainerImages.dhall).ubuntu2004 } - "./buildkite/scripts/connect-testnet.sh ${testnet} ${wait_between_graphql_poll} ${wait_before_final_check}" + "./buildkite/scripts/connect-to-testnet.sh ${testnet} ${wait_between_graphql_poll} ${wait_before_final_check}" ], label = "Connect to ${testnet}", soft_fail = Some soft_fail, From ec4556ffeafa8f7c1379e4acaf0c5b5adda2a963 Mon Sep 17 00:00:00 2001 From: dkijania Date: Wed, 29 Nov 2023 15:39:06 +0100 Subject: [PATCH 106/119] fix connect to testnet command --- buildkite/src/Command/ConnectToTestnet.dhall | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/buildkite/src/Command/ConnectToTestnet.dhall b/buildkite/src/Command/ConnectToTestnet.dhall index 80f91a28389c..7fc32580a5b3 100644 --- a/buildkite/src/Command/ConnectToTestnet.dhall +++ b/buildkite/src/Command/ConnectToTestnet.dhall @@ -13,7 +13,7 @@ let Cmd = ../Lib/Cmds.dhall in -> \(testnet : Text) -> \(wait_between_graphql_poll : Text) -> \(wait_before_final_check: Text ) - -> \(soft_fail: B/SoftFail.Boolean) + -> \(soft_fail: B/SoftFail) -> Command.build Command.Config::{ commands = [ From d23188dbc5ad7e9065b2d7a8478452f3f7baff1c Mon Sep 17 00:00:00 2001 From: dkijania Date: Wed, 29 Nov 2023 15:47:52 +0100 Subject: [PATCH 107/119] fix buildkite imports --- buildkite/src/Jobs/Test/ConnectToBerkeley.dhall | 1 + buildkite/src/Jobs/Test/ConnectToTestworld-2-0.dhall | 1 + 2 files changed, 2 insertions(+) diff --git a/buildkite/src/Jobs/Test/ConnectToBerkeley.dhall b/buildkite/src/Jobs/Test/ConnectToBerkeley.dhall index 968bbe2dd36c..4b2064362344 100644 --- a/buildkite/src/Jobs/Test/ConnectToBerkeley.dhall +++ b/buildkite/src/Jobs/Test/ConnectToBerkeley.dhall @@ -1,4 +1,5 @@ let S = ../../Lib/SelectFiles.dhall +let B = ../../External/Buildkite.dhall let B/SoftFail = B.definitions/commandStep/properties/soft_fail/Type let JobSpec = ../../Pipeline/JobSpec.dhall let Pipeline = ../../Pipeline/Dsl.dhall diff --git a/buildkite/src/Jobs/Test/ConnectToTestworld-2-0.dhall b/buildkite/src/Jobs/Test/ConnectToTestworld-2-0.dhall index 27eb31435155..0d51072d781c 100644 --- a/buildkite/src/Jobs/Test/ConnectToTestworld-2-0.dhall +++ b/buildkite/src/Jobs/Test/ConnectToTestworld-2-0.dhall @@ -1,4 +1,5 @@ let S = ../../Lib/SelectFiles.dhall +let B = ../External/Buildkite.dhall let B/SoftFail = B.definitions/commandStep/properties/soft_fail/Type let JobSpec = ../../Pipeline/JobSpec.dhall From bd9002dd9d31e1caa83304e8e57cb5a373137738 Mon Sep 17 00:00:00 2001 From: dkijania Date: Wed, 29 Nov 2023 17:13:48 +0100 Subject: [PATCH 108/119] fix import #2 --- buildkite/src/Jobs/Test/ConnectToTestworld-2-0.dhall | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/buildkite/src/Jobs/Test/ConnectToTestworld-2-0.dhall b/buildkite/src/Jobs/Test/ConnectToTestworld-2-0.dhall index 0d51072d781c..22c6142a8532 100644 --- a/buildkite/src/Jobs/Test/ConnectToTestworld-2-0.dhall +++ b/buildkite/src/Jobs/Test/ConnectToTestworld-2-0.dhall @@ -1,5 +1,5 @@ let S = ../../Lib/SelectFiles.dhall -let B = ../External/Buildkite.dhall +let B = ../../External/Buildkite.dhall let B/SoftFail = B.definitions/commandStep/properties/soft_fail/Type let JobSpec = ../../Pipeline/JobSpec.dhall From 8213812dbc80351add66e9400e52be8b63014d80 Mon Sep 17 00:00:00 2001 From: dkijania Date: Wed, 29 Nov 2023 17:47:50 +0100 Subject: [PATCH 109/119] fix perms --- buildkite/scripts/connect-to-testnet.sh | 0 1 file changed, 0 insertions(+), 0 deletions(-) mode change 100644 => 100755 buildkite/scripts/connect-to-testnet.sh diff --git a/buildkite/scripts/connect-to-testnet.sh b/buildkite/scripts/connect-to-testnet.sh old mode 100644 new mode 100755 From 977fb717cf70636b66427ca84fb5222ebf01901f Mon Sep 17 00:00:00 2001 From: dkijania Date: Wed, 29 Nov 2023 18:17:33 +0100 Subject: [PATCH 110/119] decrease required parameters --- buildkite/scripts/connect-to-testnet.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/buildkite/scripts/connect-to-testnet.sh b/buildkite/scripts/connect-to-testnet.sh index ddeacde32e52..035b134a4c59 100755 --- a/buildkite/scripts/connect-to-testnet.sh +++ b/buildkite/scripts/connect-to-testnet.sh @@ -2,7 +2,7 @@ set -eo pipefail -if [[ $# -ne 4 ]]; then +if [[ $# -ne 3 ]]; then echo "Usage: $0 '' ''''" exit 1 fi From e2b4ff6a1752cbf66fb121764c0461c044a1e95b Mon Sep 17 00:00:00 2001 From: Nathan Holland Date: Wed, 6 Dec 2023 17:28:50 -0600 Subject: [PATCH 111/119] Refactor ledger mask update_maps; fix mutation bug in set_parent --- src/lib/merkle_mask/maskable_merkle_tree.ml | 4 +- src/lib/merkle_mask/masking_merkle_tree.ml | 121 +++++++++++--------- 2 files changed, 69 insertions(+), 56 deletions(-) diff --git a/src/lib/merkle_mask/maskable_merkle_tree.ml b/src/lib/merkle_mask/maskable_merkle_tree.ml index 03cd755ee93c..f057a9f65879 100644 --- a/src/lib/merkle_mask/maskable_merkle_tree.ml +++ b/src/lib/merkle_mask/maskable_merkle_tree.ml @@ -234,7 +234,7 @@ module Make (Inputs : Inputs_intf) = struct if not (Mask.Attached.is_committing mask) then ( Mask.Attached.parent_set_notify mask account ; let child_uuid = Mask.Attached.get_uuid mask in -Mask.Attached.drop_accumulated mask; + Mask.Attached.drop_accumulated mask ; iter_descendants child_uuid ~f:Mask.Attached.drop_accumulated ; [%log error] "Update of an account in parent %s conflicted with an account \ @@ -268,7 +268,7 @@ Mask.Attached.drop_accumulated mask; List.iter masks ~f:(fun mask -> if not (Mask.Attached.is_committing mask) then ( let child_uuid = Mask.Attached.get_uuid mask in -Mask.Attached.drop_accumulated mask; + Mask.Attached.drop_accumulated mask ; iter_descendants child_uuid ~f:Mask.Attached.drop_accumulated ; [%log error] "Update of an account in parent %s conflicted with an account \ diff --git a/src/lib/merkle_mask/masking_merkle_tree.ml b/src/lib/merkle_mask/masking_merkle_tree.ml index eadc747687d8..d147c85964bf 100644 --- a/src/lib/merkle_mask/masking_merkle_tree.ml +++ b/src/lib/merkle_mask/masking_merkle_tree.ml @@ -39,25 +39,22 @@ module Make (Inputs : Inputs_intf.S) = struct end type maps_t = - { mutable accounts : Account.t Location_binable.Map.t - ; mutable token_owners : Account_id.t Token_id.Map.t - ; mutable hashes : Hash.t Addr.Map.t - ; mutable locations : Location.t Account_id.Map.t + { accounts : Account.t Location_binable.Map.t + ; token_owners : Account_id.t Token_id.Map.t + ; hashes : Hash.t Addr.Map.t + ; locations : Location.t Account_id.Map.t } [@@deriving sexp] - let maps_copy { accounts; token_owners; hashes; locations } = - { accounts; token_owners; hashes; locations } - (** Merges second maps object into the first one, potentially overwriting some keys *) let maps_merge base { accounts; token_owners; hashes; locations } = let combine ~key:_ _ v = v in - base.accounts <- Map.merge_skewed ~combine base.accounts accounts ; - base.token_owners <- - Map.merge_skewed ~combine base.token_owners token_owners ; - base.hashes <- Map.merge_skewed ~combine base.hashes hashes ; - base.locations <- Map.merge_skewed ~combine base.locations locations + { accounts = Map.merge_skewed ~combine base.accounts accounts + ; token_owners = Map.merge_skewed ~combine base.token_owners token_owners + ; hashes = Map.merge_skewed ~combine base.hashes hashes + ; locations = Map.merge_skewed ~combine base.locations locations + } (** Structure managing cache accumulated since the "base" ledger. @@ -79,9 +76,9 @@ module Make (Inputs : Inputs_intf.S) = struct [current] to [next] and [next] to [t.maps] when the mask at which accumulation of [next] started became detached. *) type accumulated_t = - { current : maps_t + { mutable current : maps_t (** Currently used cache: contains a superset of contents of masks from base ledger to the current mask *) - ; next : maps_t + ; mutable next : maps_t (** Cache that will be used after the current cache is garbage-collected *) ; base : Base.t (** Base ledger *) ; detached_next_signal : Detached_parent_signal.t @@ -97,7 +94,7 @@ module Make (Inputs : Inputs_intf.S) = struct ; detached_parent_signal : Detached_parent_signal.t ; mutable current_location : Location.t option ; depth : int - ; maps : maps_t + ; mutable maps : maps_t (* If present, contains maps containing changes both for this mask and for a few ancestors. This is used as a lookup cache. *) @@ -200,7 +197,7 @@ module Make (Inputs : Inputs_intf.S) = struct } ) (** When [accumulated] is not configured, returns current [t.maps] and parent. - + Otherwise, returns the [current] accumulator and [base]. *) let maps_and_ancestor t = actualize_accumulated t ; @@ -215,15 +212,11 @@ module Make (Inputs : Inputs_intf.S) = struct actualize_accumulated t ; match (t.accumulated, t.parent) with | Some { base; detached_next_signal; next; current }, _ -> - { base - ; detached_next_signal - ; next = maps_copy next - ; current = maps_copy current - } + { base; detached_next_signal; next; current } | None, Ok base -> { base - ; next = maps_copy t.maps - ; current = maps_copy t.maps + ; next = t.maps + ; current = t.maps ; detached_next_signal = t.detached_parent_signal } | None, Error loc -> @@ -238,13 +231,14 @@ module Make (Inputs : Inputs_intf.S) = struct let depth t = assert_is_attached t ; t.depth let update_maps ~f t = - f t.maps ; - Option.iter t.accumulated ~f:(fun { current; next; _ } -> - f current ; f next ) + t.maps <- f t.maps ; + Option.iter t.accumulated ~f:(fun acc -> + acc.current <- f acc.current ; + acc.next <- f acc.next ) let self_set_hash t address hash = update_maps t ~f:(fun maps -> - maps.hashes <- Map.set maps.hashes ~key:address ~data:hash ) + { maps with hashes = Map.set maps.hashes ~key:address ~data:hash } ) let set_inner_hash_at_addr_exn t address hash = assert_is_attached t ; @@ -253,8 +247,9 @@ module Make (Inputs : Inputs_intf.S) = struct let self_set_location t account_id location = update_maps t ~f:(fun maps -> - maps.locations <- - Map.set maps.locations ~key:account_id ~data:location ) ; + { maps with + locations = Map.set maps.locations ~key:account_id ~data:location + } ) ; (* if account is at a hitherto-unused location, that becomes the current location *) @@ -267,13 +262,17 @@ module Make (Inputs : Inputs_intf.S) = struct let self_set_account t location account = update_maps t ~f:(fun maps -> - maps.accounts <- Map.set maps.accounts ~key:location ~data:account ) ; + { maps with + accounts = Map.set maps.accounts ~key:location ~data:account + } ) ; self_set_location t (Account.identifier account) location let self_set_token_owner t token_id account_id = update_maps t ~f:(fun maps -> - maps.token_owners <- - Map.set maps.token_owners ~key:token_id ~data:account_id ) + { maps with + token_owners = + Map.set maps.token_owners ~key:token_id ~data:account_id + } ) (* a read does a lookup in the account_tbl; if that fails, delegate to parent *) @@ -529,15 +528,19 @@ module Make (Inputs : Inputs_intf.S) = struct let remove_account_and_update_hashes t location = (* remove account and key from tables *) let account = Option.value_exn (Map.find t.maps.accounts location) in - t.maps.accounts <- Map.remove t.maps.accounts location ; - (* Update token info. *) let account_id = Account.identifier account in - t.maps.token_owners <- - Token_id.Map.remove t.maps.token_owners - (Account_id.derive_token_id ~owner:account_id) ; - (* TODO : use stack database to save unused location, which can be used - when allocating a location *) - t.maps.locations <- Map.remove t.maps.locations account_id ; + t.maps <- + { t.maps with + (* remove account and key from tables *) + accounts = + Map.remove t.maps.accounts location (* update token info. *) + ; token_owners = + Token_id.Map.remove t.maps.token_owners + (Account_id.derive_token_id ~owner:account_id) + (* TODO : use stack database to save unused location, which can be used + when allocating a location *) + ; locations = Map.remove t.maps.locations account_id + } ; (* reuse location if possible *) Option.iter t.current_location ~f:(fun curr_loc -> if Location.equal location curr_loc then @@ -656,10 +659,12 @@ module Make (Inputs : Inputs_intf.S) = struct let parent = get_parent t in let old_root_hash = merkle_root t in let account_data = Map.to_alist t.maps.accounts in - t.maps.accounts <- Location_binable.Map.empty ; - t.maps.hashes <- Addr.Map.empty ; - t.maps.locations <- Account_id.Map.empty ; - t.maps.token_owners <- Token_id.Map.empty ; + t.maps <- + { t.maps with + accounts = Location_binable.Map.empty + ; hashes = Addr.Map.empty + ; token_owners = Token_id.Map.empty + } ; Base.set_batch parent account_data ; Debug_assert.debug_assert (fun () -> [%test_result: Hash.t] @@ -679,12 +684,13 @@ module Make (Inputs : Inputs_intf.S) = struct ; detached_parent_signal = Async.Ivar.create () ; current_location = t.current_location ; depth = t.depth - ; maps = maps_copy t.maps + ; maps = t.maps ; accumulated = Option.map t.accumulated ~f:(fun acc -> - { acc with - next = maps_copy acc.next - ; current = maps_copy acc.current + { base = acc.base + ; detached_next_signal = acc.detached_next_signal + ; next = acc.next + ; current = acc.current } ) ; is_committing = false } @@ -887,9 +893,12 @@ module Make (Inputs : Inputs_intf.S) = struct as sometimes this is desired behavior *) let close t = assert_is_attached t ; - t.maps.accounts <- Location_binable.Map.empty ; - t.maps.hashes <- Addr.Map.empty ; - t.maps.locations <- Account_id.Map.empty ; + t.maps <- + { t.maps with + accounts = Location_binable.Map.empty + ; hashes = Addr.Map.empty + ; locations = Account_id.Map.empty + } ; Async.Ivar.fill_if_empty t.detached_parent_signal () let index_of_account_exn t key = @@ -1047,9 +1056,13 @@ module Make (Inputs : Inputs_intf.S) = struct ( match accumulated_opt with | Some { current; next; base; detached_next_signal } when Option.is_none t.accumulated -> - maps_merge current t.maps ; - maps_merge next t.maps ; - t.accumulated <- Some { current; next; base; detached_next_signal } + t.accumulated <- + Some + { current = maps_merge current t.maps + ; next = maps_merge next t.maps + ; base + ; detached_next_signal + } | _ -> () ) ; t From 4f8bb8e51f1783b07d143a2e6a8af96e41f36f5d Mon Sep 17 00:00:00 2001 From: georgeee Date: Thu, 7 Dec 2023 17:38:16 +0100 Subject: [PATCH 112/119] fixup! Refactor ledger mask update_maps; fix mutation bug in set_parent --- src/lib/merkle_mask/masking_merkle_tree.ml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/lib/merkle_mask/masking_merkle_tree.ml b/src/lib/merkle_mask/masking_merkle_tree.ml index d147c85964bf..ab56cf72e51f 100644 --- a/src/lib/merkle_mask/masking_merkle_tree.ml +++ b/src/lib/merkle_mask/masking_merkle_tree.ml @@ -660,10 +660,10 @@ module Make (Inputs : Inputs_intf.S) = struct let old_root_hash = merkle_root t in let account_data = Map.to_alist t.maps.accounts in t.maps <- - { t.maps with - accounts = Location_binable.Map.empty + { accounts = Location_binable.Map.empty ; hashes = Addr.Map.empty ; token_owners = Token_id.Map.empty + ; locations = Account_id.Map.empty } ; Base.set_batch parent account_data ; Debug_assert.debug_assert (fun () -> From 4a0fff9a2966d14b7a1c0ec00785cea9096b5341 Mon Sep 17 00:00:00 2001 From: dkijania Date: Tue, 12 Dec 2023 16:28:49 +0100 Subject: [PATCH 113/119] Empty commit to signpost Berkeley 2.0.0rampup7 From 4bfb01d718abc1fab2370071426c58a8947f44f5 Mon Sep 17 00:00:00 2001 From: dkijania Date: Wed, 13 Dec 2023 21:46:15 +0100 Subject: [PATCH 114/119] create mina-receipt-chain-hash-fix --- buildkite/scripts/build-artifact.sh | 2 + .../Release/ReceiptChainHashFixArtifact.dhall | 42 ++++++++++++++++++ .../Dockerfile-mina-receipt-chain-hash-fix | 44 +++++++++++++++++++ scripts/migrate-itn-data.sh | 10 +++-- scripts/rebuild-deb.sh | 14 ++++++ 5 files changed, 109 insertions(+), 3 deletions(-) create mode 100644 buildkite/src/Jobs/Release/ReceiptChainHashFixArtifact.dhall create mode 100644 dockerfiles/Dockerfile-mina-receipt-chain-hash-fix diff --git a/buildkite/scripts/build-artifact.sh b/buildkite/scripts/build-artifact.sh index adb98a79ff8a..a8a91383dbb5 100755 --- a/buildkite/scripts/build-artifact.sh +++ b/buildkite/scripts/build-artifact.sh @@ -28,6 +28,8 @@ dune build "--profile=${DUNE_PROFILE}" \ src/app/replayer/replayer.exe \ src/app/extract_blocks/extract_blocks.exe \ src/app/archive_blocks/archive_blocks.exe \ + src/app/last_vrf_output_to_b64/last_vrf_output_to_b64.exe \ + src/app/receipt_chain_hash_to_b58/receipt_chain_hash_to_b58.exe \ src/app/batch_txn_tool/batch_txn_tool.exe \ src/app/missing_blocks_auditor/missing_blocks_auditor.exe \ src/app/swap_bad_balances/swap_bad_balances.exe \ diff --git a/buildkite/src/Jobs/Release/ReceiptChainHashFixArtifact.dhall b/buildkite/src/Jobs/Release/ReceiptChainHashFixArtifact.dhall new file mode 100644 index 000000000000..22ea3f16230a --- /dev/null +++ b/buildkite/src/Jobs/Release/ReceiptChainHashFixArtifact.dhall @@ -0,0 +1,42 @@ +let Prelude = ../../External/Prelude.dhall + +let Cmd = ../../Lib/Cmds.dhall +let S = ../../Lib/SelectFiles.dhall +let D = S.PathPattern + +let Pipeline = ../../Pipeline/Dsl.dhall +let JobSpec = ../../Pipeline/JobSpec.dhall + +let Command = ../../Command/Base.dhall +let Size = ../../Command/Size.dhall +let DockerImage = ../../Command/DockerImage.dhall + +let spec = DockerImage.ReleaseSpec::{ + service="receipt-chain-hash-fix", + step_key="receipt-chain-hash-fix-docker-image", + network="berkeley", + deps = [ { name = "MinaArtifactBullseye", key = "daemon-berkeley-bullseye-docker-image" } ] +} + +in + +Pipeline.build + Pipeline.Config::{ + spec = + JobSpec::{ + dirtyWhen = [ + S.strictlyStart (S.contains "buildkite/src/Jobs/Release/ReceiptChainHashFixArtifact"), + S.strictlyStart (S.contains "src/app/receipt_chain_hash_to_b58"), + S.strictlyStart (S.contains "src/app/last_vrf_output_to_b64"), + ], + path = "Release", + name = "ReceiptChainHashFixArtifact" + }, + steps = [ + DockerImage.generateStep spec + ] + } + + + + diff --git a/dockerfiles/Dockerfile-mina-receipt-chain-hash-fix b/dockerfiles/Dockerfile-mina-receipt-chain-hash-fix new file mode 100644 index 000000000000..c32fe0ee1d88 --- /dev/null +++ b/dockerfiles/Dockerfile-mina-receipt-chain-hash-fix @@ -0,0 +1,44 @@ +ARG image=debian:bullseye +FROM ${image} + +# Run with `docker build --build-arg deb_version=` +ARG deb_version +ARG deb_codename=bullseye +ARG deb_release=unstable + +ENV DEBIAN_FRONTEND noninteractive +RUN echo "Building image with version $deb_codename $deb_release $deb_version" + +COPY scripts/archive-entrypoint.sh /entrypoint.sh +RUN chmod +x /entrypoint.sh + +# Dependencies +RUN apt-get update --quiet --yes \ + && apt-get upgrade --quiet --yes \ + && apt-get install --quiet --yes --no-install-recommends \ + procps \ + curl \ + jq \ + dumb-init \ + libssl1.1 \ + libgomp1 \ + libpq-dev \ + apt-transport-https \ + ca-certificates \ + dnsutils \ + tzdata \ + postgresql \ + postgresql-contrib \ + apt-utils \ + man \ + && rm -rf /var/lib/apt/lists/* + +# archive-node package +RUN echo "deb [trusted=yes] http://packages.o1test.net $deb_codename $deb_release" > /etc/apt/sources.list.d/o1.list \ + && apt-get update --quiet --yes \ + && apt-get install --quiet --yes "mina-receipt-chain-hash-fix=$deb_version" \ + && rm -rf /var/lib/apt/lists/* + +ENV RECEIPT_CHAIN_HASH_TO_B58_APP /usr/local/bin/mina-receipt-chain-hash-to-b58 +ENV LAST_VRF_OUTPUT_TO_B64_APP /usr/local/bin/mina-last-vrf-output-to-b64 +ENTRYPOINT ["/etc/mina/receipt-chain-hash-fix-script/migrate-itn-data.sh"] \ No newline at end of file diff --git a/scripts/migrate-itn-data.sh b/scripts/migrate-itn-data.sh index 41f1e4b0a598..3aa1ca2ba801 100755 --- a/scripts/migrate-itn-data.sh +++ b/scripts/migrate-itn-data.sh @@ -1,14 +1,18 @@ #!/bin/bash -if [ $# -lt 1 ] || [ $# -gt 3 ]; then +if [ $# -lt 1 ] || [ $# -gt 5 ]; then echo "Usage" $0 archive-db [data_file] [update_script] echo "'data_file' and 'update_script' are created when running this script" + echo "[env] RECEIPT_CHAIN_HASH_TO_B58_APP overrides receipt_chain_hash_to_b58 location" + echo "[env] LAST_VRF_OUTPUT_TO_B64_APP overrides last_vrf_output_to_b64 location" exit 0 fi ARCHIVE_DB=$1 DATA_FILE=${2:-data_file.tmp} UPDATE_SCRIPT=${3:-data_update.sql} +RECEIPT_CHAIN_HASH_TO_B58_APP=${RECEIPT_CHAIN_HASH_TO_B58_APP:-_build/default/src/app/receipt_chain_hash_to_b58/receipt_chain_hash_to_b58.exe} +LAST_VRF_OUTPUT_TO_B64_APP=${LAST_VRF_OUTPUT_TO_B64_APP:-_build/default/src/app/last_vrf_output_to_b64/last_vrf_output_to_b64.exe} echo "Migrating receipt chain hashes in account preconditions in archive db '"$ARCHIVE_DB"'" @@ -24,7 +28,7 @@ for line in `cat $DATA_FILE` do ( ID=$(echo $line | awk -F , '{print $1}'); FP=$(echo $line | awk -F , '{print $2}'); - B58=$(echo $FP | _build/default/src/app/receipt_chain_hash_to_b58/receipt_chain_hash_to_b58.exe); + B58=$(echo $FP | $RECEIPT_CHAIN_HASH_TO_B58_APP ); echo -n . echo $ID "'"$B58"'" | awk '{print "UPDATE zkapp_account_precondition SET receipt_chain_hash=" $2 " WHERE id=" $1 ";"}' >> $UPDATE_SCRIPT) done @@ -43,7 +47,7 @@ for line in `cat $DATA_FILE` do ( ID=$(echo $line | awk -F , '{print $1}'); FP=$(echo $line | awk -F , '{print $2}'); - B64=$(echo $FP | _build/default/src/app/last_vrf_output_to_b64/last_vrf_output_to_b64.exe); + B64=$(echo $FP | $LAST_VRF_OUTPUT_TO_B64_APP); echo -n . echo $ID "'"$B64"'" | awk '{print "UPDATE blocks SET last_vrf_output=" $2 " WHERE id=" $1 ";"}' >> $UPDATE_SCRIPT) done diff --git a/scripts/rebuild-deb.sh b/scripts/rebuild-deb.sh index 770e1dcbf604..9e9deba4a6d1 100755 --- a/scripts/rebuild-deb.sh +++ b/scripts/rebuild-deb.sh @@ -197,6 +197,20 @@ build_deb mina-logproc ##################################### END LOGPROC PACKAGE ####################################### +##################################### GENERATE RECEIPT CHAIN HASH FIX PACKAGE ####################################### + +create_control_file mina-receipt-chain-hash-fix "${SHARED_DEPS}" 'Tool to run automated fix against a archive database for receipt chain hash.' + +mkdir -p "${BUILDDIR}/etc/mina/receipt-chain-hash-fix-script" + +# Binaries +cp ../src/scripts/migrate-itn-data.sh "${BUILDDIR}/etc/mina/receipt-chain-hash-fix-script/migrate-itn-data.sh" +cp ./default/src/app/last_vrf_output_to_b64/last_vrf_output_to_b64.exe "${BUILDDIR}/usr/local/bin/mina-last-vrf-output-to-b64" +cp ./default/src/app/receipt_chain_hash_to_b58/receipt_chain_hash_to_b58.exe "${BUILDDIR}/usr/local/bin/mina-receipt-chain-hash-to-b58" + +build_deb mina-receipt-chain-hash-fix + +##################################### END RECEIPT CHAIN HASH FIX PACKAGE ####################################### ##################################### GENERATE TEST_EXECUTIVE PACKAGE ####################################### From 1e3a60cf7b3179f0c976fca3f41a44ed24009c79 Mon Sep 17 00:00:00 2001 From: dkijania Date: Wed, 13 Dec 2023 21:54:11 +0100 Subject: [PATCH 115/119] remove comma --- buildkite/src/Jobs/Release/ReceiptChainHashFixArtifact.dhall | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/buildkite/src/Jobs/Release/ReceiptChainHashFixArtifact.dhall b/buildkite/src/Jobs/Release/ReceiptChainHashFixArtifact.dhall index 22ea3f16230a..f7bcdfd24c05 100644 --- a/buildkite/src/Jobs/Release/ReceiptChainHashFixArtifact.dhall +++ b/buildkite/src/Jobs/Release/ReceiptChainHashFixArtifact.dhall @@ -27,7 +27,7 @@ Pipeline.build dirtyWhen = [ S.strictlyStart (S.contains "buildkite/src/Jobs/Release/ReceiptChainHashFixArtifact"), S.strictlyStart (S.contains "src/app/receipt_chain_hash_to_b58"), - S.strictlyStart (S.contains "src/app/last_vrf_output_to_b64"), + S.strictlyStart (S.contains "src/app/last_vrf_output_to_b64") ], path = "Release", name = "ReceiptChainHashFixArtifact" From e95609a75044051bf5e98f5b53e5b1130d671ad0 Mon Sep 17 00:00:00 2001 From: dkijania Date: Wed, 13 Dec 2023 22:10:55 +0100 Subject: [PATCH 116/119] fix script location --- scripts/rebuild-deb.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/rebuild-deb.sh b/scripts/rebuild-deb.sh index 9e9deba4a6d1..5afb9b282e8d 100755 --- a/scripts/rebuild-deb.sh +++ b/scripts/rebuild-deb.sh @@ -204,7 +204,7 @@ create_control_file mina-receipt-chain-hash-fix "${SHARED_DEPS}" 'Tool to run au mkdir -p "${BUILDDIR}/etc/mina/receipt-chain-hash-fix-script" # Binaries -cp ../src/scripts/migrate-itn-data.sh "${BUILDDIR}/etc/mina/receipt-chain-hash-fix-script/migrate-itn-data.sh" +cp ../scripts/migrate-itn-data.sh "${BUILDDIR}/etc/mina/receipt-chain-hash-fix-script/migrate-itn-data.sh" cp ./default/src/app/last_vrf_output_to_b64/last_vrf_output_to_b64.exe "${BUILDDIR}/usr/local/bin/mina-last-vrf-output-to-b64" cp ./default/src/app/receipt_chain_hash_to_b58/receipt_chain_hash_to_b58.exe "${BUILDDIR}/usr/local/bin/mina-receipt-chain-hash-to-b58" From b61a2aa150becee1e8b29363a82838c08b826848 Mon Sep 17 00:00:00 2001 From: dkijania Date: Wed, 13 Dec 2023 23:05:46 +0100 Subject: [PATCH 117/119] add docker setup --- buildkite/src/Jobs/Release/ReceiptChainHashFixArtifact.dhall | 2 +- scripts/release-docker.sh | 5 ++++- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/buildkite/src/Jobs/Release/ReceiptChainHashFixArtifact.dhall b/buildkite/src/Jobs/Release/ReceiptChainHashFixArtifact.dhall index f7bcdfd24c05..dfe53a835b40 100644 --- a/buildkite/src/Jobs/Release/ReceiptChainHashFixArtifact.dhall +++ b/buildkite/src/Jobs/Release/ReceiptChainHashFixArtifact.dhall @@ -12,7 +12,7 @@ let Size = ../../Command/Size.dhall let DockerImage = ../../Command/DockerImage.dhall let spec = DockerImage.ReleaseSpec::{ - service="receipt-chain-hash-fix", + service="mina-receipt-chain-hash-fix", step_key="receipt-chain-hash-fix-docker-image", network="berkeley", deps = [ { name = "MinaArtifactBullseye", key = "daemon-berkeley-bullseye-docker-image" } ] diff --git a/scripts/release-docker.sh b/scripts/release-docker.sh index 4587129a8a5a..37135ef7d7bc 100755 --- a/scripts/release-docker.sh +++ b/scripts/release-docker.sh @@ -10,7 +10,7 @@ set +x CLEAR='\033[0m' RED='\033[0;31m' # Array of valid service names -VALID_SERVICES=('mina-archive', 'mina-daemon' 'mina-rosetta' 'mina-test-executive' 'mina-batch-txn' 'mina-zkapp-test-transaction' 'mina-toolchain' 'bot' 'leaderboard' 'delegation-backend' 'delegation-backend-toolchain' 'itn-orchestrator') +VALID_SERVICES=('mina-archive', 'mina-daemon' 'mina-rosetta' 'mina-test-executive' 'mina-receipt-chain-hash-fix' 'mina-batch-txn' 'mina-zkapp-test-transaction' 'mina-toolchain' 'bot' 'leaderboard' 'delegation-backend' 'delegation-backend-toolchain' 'itn-orchestrator') function usage() { if [[ -n "$1" ]]; then @@ -99,6 +99,9 @@ mina-batch-txn) mina-rosetta) DOCKERFILE_PATH="dockerfiles/stages/1-build-deps dockerfiles/stages/2-opam-deps dockerfiles/stages/3-builder dockerfiles/stages/4-production" ;; +mina-receipt-chain-hash-fix) + DOCKERFILE_PATH="dockerfiles/Dockerfile-mina-receipt-chain-hash-fix" + ;; mina-zkapp-test-transaction) DOCKERFILE_PATH="dockerfiles/Dockerfile-zkapp-test-transaction" ;; From b8220d844b305a99ae4434f3669227a4d29a6355 Mon Sep 17 00:00:00 2001 From: dkijania Date: Thu, 14 Dec 2023 11:47:14 +0100 Subject: [PATCH 118/119] set proper context for chain hash fix docker --- scripts/release-docker.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/scripts/release-docker.sh b/scripts/release-docker.sh index 37135ef7d7bc..7e65c185470c 100755 --- a/scripts/release-docker.sh +++ b/scripts/release-docker.sh @@ -101,6 +101,7 @@ mina-rosetta) ;; mina-receipt-chain-hash-fix) DOCKERFILE_PATH="dockerfiles/Dockerfile-mina-receipt-chain-hash-fix" + DOCKER_CONTEXT="dockerfiles/" ;; mina-zkapp-test-transaction) DOCKERFILE_PATH="dockerfiles/Dockerfile-zkapp-test-transaction" From d5d884eac63e9e050ff1d1ab926383a658fbb477 Mon Sep 17 00:00:00 2001 From: dkijania Date: Thu, 14 Dec 2023 13:51:39 +0100 Subject: [PATCH 119/119] fix dependencies for fix package --- scripts/rebuild-deb.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/rebuild-deb.sh b/scripts/rebuild-deb.sh index 5afb9b282e8d..29a940d4192e 100755 --- a/scripts/rebuild-deb.sh +++ b/scripts/rebuild-deb.sh @@ -199,7 +199,7 @@ build_deb mina-logproc ##################################### GENERATE RECEIPT CHAIN HASH FIX PACKAGE ####################################### -create_control_file mina-receipt-chain-hash-fix "${SHARED_DEPS}" 'Tool to run automated fix against a archive database for receipt chain hash.' +create_control_file mina-receipt-chain-hash-fix "${SHARED_DEPS}${DAEMON_DEPS}" 'Tool to run automated fix against a archive database for receipt chain hash.' mkdir -p "${BUILDDIR}/etc/mina/receipt-chain-hash-fix-script"