diff --git a/src/app/cli/src/init/client.ml b/src/app/cli/src/init/client.ml index 5a53f6ba434..e1af49139ba 100644 --- a/src/app/cli/src/init/client.ml +++ b/src/app/cli/src/init/client.ml @@ -2268,6 +2268,55 @@ let signature_kind = in Core.print_endline signature_kind_string ) +let test_ledger_application = + Command.async ~summary:"Test ledger application" + (let%map_open.Command privkey_path = Cli_lib.Flag.privkey_read_path + and prev_block_path = + flag "--prev-block-path" ~doc:"FILE file with serialized block" + (optional string) + and ledger_path = + flag "--ledger-path" ~doc:"FILE directory with ledger DB" + (required string) + and num_txs = + flag "--num-txs" + ~doc:"NN Number of transactions to create after preparatory rounds" + (required int) + and num_txs_per_round = + flag "--num-txs-per-round" + ~doc: + "NN Number of transactions to create per preparatory round \ + (default: 3)" + (optional int) + and rounds = + flag "--rounds" ~doc:"NN Number of preparatory rounds (default: 580)" + (optional int) + and first_partition_slots = + flag "--first-partition-slots" + ~doc: + "NN Number of slots in first partition of scan state (default: 128)" + (optional int) + and max_depth = + flag "--max-depth" ~doc:"NN Maximum depth of masks (default: 290)" + (optional int) + and no_new_stack = + flag "--old-stack" ~doc:"Use is_new_stack: false (scan state)" no_arg + and has_second_partition = + flag "--has-second-partition" + ~doc:"Assume there is a second partition (scan state)" no_arg + and tracing = flag "--tracing" ~doc:"Wrap test into tracing" no_arg + and no_masks = flag "--no-masks" ~doc:"Do not create masks" no_arg in + Cli_lib.Exceptions.handle_nicely + @@ fun () -> + let first_partition_slots = + Option.value ~default:128 first_partition_slots + in + let num_txs_per_round = Option.value ~default:3 num_txs_per_round in + let rounds = Option.value ~default:580 rounds in + let max_depth = Option.value ~default:290 max_depth in + Test_ledger_application.test ~privkey_path ~ledger_path ?prev_block_path + ~first_partition_slots ~no_new_stack ~has_second_partition + ~num_txs_per_round ~rounds ~no_masks ~max_depth ~tracing num_txs ) + let itn_create_accounts = Command.async ~summary:"Fund new accounts for incentivized testnet" (let open Command.Param in @@ -2433,6 +2482,7 @@ let ledger = [ ("export", export_ledger) ; ("hash", hash_ledger) ; ("currency", currency_in_ledger) + ; ("test-apply", test_ledger_application) ] let libp2p = diff --git a/src/app/cli/src/init/dune b/src/app/cli/src/init/dune index cf38bfd4e5a..433a49e93ac 100644 --- a/src/app/cli/src/init/dune +++ b/src/app/cli/src/init/dune @@ -35,6 +35,7 @@ cohttp-async graphql-async mirage-crypto-ec + base_quickcheck ;;local libraries bounded_types snark_profiler_lib @@ -122,6 +123,7 @@ string_sign zkapp_command_builder internal_tracing + transaction_snark_scan_state ) (instrumentation (backend bisect_ppx)) (preprocessor_deps ../../../../../graphql_schema.json diff --git a/src/app/cli/src/init/test_ledger_application.ml b/src/app/cli/src/init/test_ledger_application.ml new file mode 100644 index 00000000000..ccf2da60be9 --- /dev/null +++ b/src/app/cli/src/init/test_ledger_application.ml @@ -0,0 +1,219 @@ +(* test_ledger_application.ml -- code to test application of transactions to a specific ledger *) + +open Core_kernel +open Async_kernel +open Mina_ledger +open Mina_base +open Mina_state + +let logger = Logger.create () + +let read_privkey privkey_path = + let password = + lazy (Secrets.Keypair.Terminal_stdin.prompt_password "Enter password: ") + in + match%map Secrets.Keypair.read ~privkey_path ~password with + | Ok keypair -> + keypair + | Error err -> + eprintf "Could not read the specified keypair: %s\n" + (Secrets.Privkey_error.to_string err) ; + exit 1 + +let mk_tx ~(constraint_constants : Genesis_constants.Constraint_constants.t) + keypair nonce = + let num_acc_updates = 8 in + let multispec : Transaction_snark.For_tests.Multiple_transfers_spec.t = + let fee_payer = None in + let receivers = + Quickcheck.random_value + ~seed:(`Deterministic ("test-apply-" ^ Unsigned.UInt32.to_string nonce)) + @@ Base_quickcheck.Generator.list_with_length ~length:num_acc_updates + @@ let%map.Base_quickcheck.Generator kp = Signature_lib.Keypair.gen in + (Signature_lib.Public_key.compress kp.public_key, Currency.Amount.zero) + in + let zkapp_account_keypairs = [] in + let new_zkapp_account = false in + let snapp_update = Account_update.Update.dummy in + let actions = [] in + let events = [] in + let call_data = Snark_params.Tick.Field.zero in + let preconditions = Some Account_update.Preconditions.accept in + { fee = Currency.Fee.of_mina_int_exn 1 + ; sender = (keypair, nonce) + ; fee_payer + ; receivers + ; amount = + Currency.Amount.( + scale + (of_fee constraint_constants.account_creation_fee) + num_acc_updates) + |> Option.value_exn ~here:[%here] + ; zkapp_account_keypairs + ; memo = Signed_command_memo.empty + ; new_zkapp_account + ; snapp_update + ; actions + ; events + ; call_data + ; preconditions + } + in + Transaction_snark.For_tests.multiple_transfers ~constraint_constants multispec + +let generate_protocol_state_stub ~consensus_constants ~constraint_constants + ledger = + let open Staged_ledger_diff in + Protocol_state.negative_one + ~genesis_ledger:(lazy ledger) + ~genesis_epoch_data:None ~constraint_constants ~consensus_constants + ~genesis_body_reference + +let apply_txs ~constraint_constants ~first_partition_slots ~no_new_stack + ~has_second_partition ~num_txs ~prev_protocol_state + ~(keypair : Signature_lib.Keypair.t) ~i ledger = + let init_nonce = + let account_id = Account_id.of_public_key keypair.public_key in + let loc = + Ledger.location_of_account ledger account_id + |> Option.value_exn ~here:[%here] + in + let account = Ledger.get ledger loc |> Option.value_exn ~here:[%here] in + account.nonce + in + let to_nonce = + Fn.compose (Unsigned.UInt32.add init_nonce) Unsigned.UInt32.of_int + in + let mk_tx' = mk_tx ~constraint_constants keypair in + let fork_slot = + Option.value_map ~default:Mina_numbers.Global_slot_since_genesis.zero + ~f:(fun f -> f.global_slot_since_genesis) + constraint_constants.fork + in + let prev_protocol_state_body_hash = + Protocol_state.body prev_protocol_state |> Protocol_state.Body.hash + in + let prev_protocol_state_hash = + (Protocol_state.hashes_with_body ~body_hash:prev_protocol_state_body_hash + prev_protocol_state ) + .state_hash + in + let prev_state_view = + Protocol_state.body prev_protocol_state + |> Mina_state.Protocol_state.Body.view + in + let global_slot = + Protocol_state.consensus_state prev_protocol_state + |> Consensus.Data.Consensus_state.curr_global_slot + |> Mina_numbers.Global_slot_since_hard_fork.succ + |> Mina_numbers.Global_slot_since_hard_fork.to_int + |> Mina_numbers.Global_slot_span.of_int + |> Mina_numbers.Global_slot_since_genesis.add fork_slot + in + let zkapps = List.init num_txs ~f:(Fn.compose mk_tx' to_nonce) in + let pending_coinbase = + Pending_coinbase.create ~depth:constraint_constants.pending_coinbase_depth + () + |> Or_error.ok_exn + in + let zkapps' = + List.map zkapps ~f:(fun tx -> + { With_status.data = + Mina_transaction.Transaction.Command (User_command.Zkapp_command tx) + ; status = Applied + } ) + in + let accounts_accessed = + List.fold_left ~init:Account_id.Set.empty zkapps ~f:(fun set txn -> + Account_id.Set.( + union set (of_list (Zkapp_command.accounts_referenced txn))) ) + |> Set.to_list + in + Ledger.unsafe_preload_accounts_from_parent ledger accounts_accessed ; + let start = Time.now () in + match%map + Staged_ledger.Test_helpers.update_coinbase_stack_and_get_data_impl + ~first_partition_slots ~is_new_stack:(not no_new_stack) + ~no_second_partition:(not has_second_partition) ~constraint_constants + ~logger ~global_slot ledger pending_coinbase zkapps' prev_state_view + (prev_protocol_state_hash, prev_protocol_state_body_hash) + with + | Ok (b, _, _, _, _) -> + let root = Ledger.merkle_root ledger in + printf + !"Result of application %d: %B (took %s): new root %s\n%!" + i b + Time.(Span.to_string @@ diff (now ()) start) + (Ledger_hash.to_base58_check root) + | Error e -> + eprintf + !"Error applying staged ledger: %s\n%!" + (Staged_ledger.Staged_ledger_error.to_string e) ; + exit 1 + +let test ~privkey_path ~ledger_path ?prev_block_path ~first_partition_slots + ~no_new_stack ~has_second_partition ~num_txs_per_round ~rounds ~no_masks + ~max_depth ~tracing num_txs_final = + O1trace.thread "mina" + @@ fun () -> + let%bind keypair = read_privkey privkey_path in + let constraint_constants = + Genesis_constants_compiled.Constraint_constants.t + in + let init_ledger = + Ledger.create ~directory_name:ledger_path + ~depth:constraint_constants.ledger_depth () + in + let prev_protocol_state = + let%map.Option prev_block_path = prev_block_path in + let prev_block_data = In_channel.read_all prev_block_path in + let prev_block = + Binable.of_string (module Mina_block.Stable.Latest) prev_block_data + in + Mina_block.header prev_block |> Mina_block.Header.protocol_state + in + let consensus_constants = + Consensus.Constants.create ~constraint_constants + ~protocol_constants:Genesis_constants_compiled.t.protocol + in + let prev_protocol_state = + match prev_protocol_state with + | None -> + generate_protocol_state_stub ~consensus_constants ~constraint_constants + init_ledger + | Some p -> + p + in + let apply = + apply_txs ~constraint_constants ~first_partition_slots ~no_new_stack + ~has_second_partition ~prev_protocol_state ~keypair + in + let mask_handler ledger = + if no_masks then Fn.const ledger + else + Fn.compose (Ledger.register_mask ledger) + @@ Ledger.Mask.create ~depth:constraint_constants.ledger_depth + in + let drop_old_ledger ledger = + if not no_masks then ( + Ledger.commit ledger ; + Ledger.remove_and_reparent_exn ledger ledger ) + in + let stop_tracing = + if tracing then (fun x -> Mina_tracing.stop () ; x) else ident + in + let init_root = Ledger.merkle_root init_ledger in + printf !"Init root %s\n%!" (Ledger_hash.to_base58_check init_root) ; + Deferred.List.fold (List.init rounds ~f:ident) ~init:(init_ledger, []) + ~f:(fun (ledger, ledgers) i -> + let%bind () = + if tracing && i = 1 then Mina_tracing.start "." else Deferred.unit + in + List.hd (List.drop ledgers (max_depth - 1)) + |> Option.iter ~f:drop_old_ledger ; + apply ~num_txs:num_txs_per_round ~i ledger + >>| mask_handler ledger + >>| Fn.flip Tuple2.create (ledger :: ledgers) ) + >>| fst + >>= apply ~num_txs:num_txs_final ~i:rounds + >>| stop_tracing diff --git a/src/lib/cli_lib/commands.ml b/src/lib/cli_lib/commands.ml index 3e739f4ba43..78a11490670 100644 --- a/src/lib/cli_lib/commands.ml +++ b/src/lib/cli_lib/commands.ml @@ -8,21 +8,19 @@ open Async let generate_keypair = Command.async ~summary:"Generate a new public, private keypair" - (let open Command.Let_syntax in - let%map_open privkey_path = Flag.privkey_write_path in - Exceptions.handle_nicely - @@ fun () -> - let env = Secrets.Keypair.env in - if Option.is_some (Sys.getenv env) then - eprintf "Using password from environment variable %s\n" env ; - let open Deferred.Let_syntax in - let kp = Keypair.create () in - let%bind () = Secrets.Keypair.Terminal_stdin.write_exn kp ~privkey_path in - printf "Keypair generated\nPublic key: %s\nRaw public key: %s\n" - ( kp.public_key |> Public_key.compress - |> Public_key.Compressed.to_base58_check ) - (Rosetta_coding.Coding.of_public_key kp.public_key) ; - exit 0) + (let%map_open.Command privkey_path = Flag.privkey_write_path in + Exceptions.handle_nicely + @@ fun () -> + let env = Secrets.Keypair.env in + if Option.is_some (Sys.getenv env) then + eprintf "Using password from environment variable %s\n" env ; + let kp = Keypair.create () in + let%bind () = Secrets.Keypair.Terminal_stdin.write_exn kp ~privkey_path in + printf "Keypair generated\nPublic key: %s\nRaw public key: %s\n" + ( kp.public_key |> Public_key.compress + |> Public_key.Compressed.to_base58_check ) + (Rosetta_coding.Coding.of_public_key kp.public_key) ; + exit 0 ) let validate_keypair = Command.async ~summary:"Validate a public, private keypair" diff --git a/src/lib/merkle_ledger/any_ledger.ml b/src/lib/merkle_ledger/any_ledger.ml index e74ac391d90..33def1b347a 100644 --- a/src/lib/merkle_ledger/any_ledger.ml +++ b/src/lib/merkle_ledger/any_ledger.ml @@ -87,7 +87,8 @@ module Make_base (Inputs : Intf.Inputs.Intf) : let get_at_index_exn (T ((module Base), t)) = Base.get_at_index_exn t - let set_batch (T ((module Base), t)) = Base.set_batch t + let set_batch ?hash_cache (T ((module Base), t)) = + Base.set_batch ?hash_cache t let set (T ((module Base), t)) = Base.set t diff --git a/src/lib/merkle_ledger/intf.ml b/src/lib/merkle_ledger/intf.ml index 39de10c4c5e..a42e16088ea 100644 --- a/src/lib/merkle_ledger/intf.ml +++ b/src/lib/merkle_ledger/intf.ml @@ -392,7 +392,8 @@ module Ledger = struct val set : t -> Location.t -> account -> unit - val set_batch : t -> (Location.t * account) list -> unit + val set_batch : + ?hash_cache:hash Addr.Map.t -> t -> (Location.t * account) list -> unit val get_at_index_exn : t -> int -> account diff --git a/src/lib/merkle_ledger/null_ledger.ml b/src/lib/merkle_ledger/null_ledger.ml index 157244e8c10..7bb99af3660 100644 --- a/src/lib/merkle_ledger/null_ledger.ml +++ b/src/lib/merkle_ledger/null_ledger.ml @@ -93,7 +93,8 @@ end = struct let get_at_index_exn _t = failwith "get_at_index_exn: null ledgers are empty" - let set_batch _t = failwith "set_batch: null ledgers cannot be mutated" + let set_batch ?hash_cache:_ _t = + failwith "set_batch: null ledgers cannot be mutated" let set _t = failwith "set: null ledgers cannot be mutated" diff --git a/src/lib/merkle_ledger/util.ml b/src/lib/merkle_ledger/util.ml index af51910d7c5..46bcba58e96 100644 --- a/src/lib/merkle_ledger/util.ml +++ b/src/lib/merkle_ledger/util.ml @@ -57,7 +57,10 @@ module Make (Inputs : Inputs_intf) : sig Inputs.Base.t -> (Inputs.Location.t * Inputs.Hash.t) list -> unit val set_batch : - Inputs.Base.t -> (Inputs.Location.t * Inputs.Account.t) list -> unit + ?hash_cache:Inputs.Hash.t Inputs.Location.Addr.Map.t + -> Inputs.Base.t + -> (Inputs.Location.t * Inputs.Account.t) list + -> unit val set_batch_accounts : Inputs.Base.t -> (Inputs.Location.Addr.t * Inputs.Account.t) list -> unit @@ -82,7 +85,18 @@ end = struct | addr, Some account -> Some (addr, account) ) - let rec compute_affected_locations_and_hashes t locations_and_hashes acc = + let lookup_hash ?hash_cache ~compute loc = + let res = + let%bind.Option addr = + Option.try_with (fun () -> Inputs.Location.to_path_exn loc) + in + let%bind.Option m = hash_cache in + Map.find m addr + in + match res with Some x -> x | _ -> compute () + + let rec compute_affected_locations_and_hashes ?hash_cache t + locations_and_hashes acc = let ledger_depth = Inputs.ledger_depth t in if not @@ List.is_empty locations_and_hashes then let height = @@ -103,11 +117,13 @@ end = struct the hash now. *) let parent_hash = - let left_hash, right_hash = - Inputs.Location.order_siblings location hash - sibling_hash - in - Inputs.Hash.merge ~height left_hash right_hash + lookup_hash ?hash_cache parent_location + ~compute:(fun () -> + let left_hash, right_hash = + Inputs.Location.order_siblings location hash + sibling_hash + in + Inputs.Hash.merge ~height left_hash right_hash ) in `Hash parent_hash | Some (`Hash _) -> @@ -125,27 +141,32 @@ end = struct (* We haven't recorded the sibling, so query the ledger to get the hash. *) - let sibling_location = Inputs.Location.sibling location in - let sibling_hash = Inputs.get_hash t sibling_location in let parent_hash = - let left_hash, right_hash = - Inputs.Location.order_siblings location hash sibling_hash - in - Inputs.Hash.merge ~height left_hash right_hash + lookup_hash ?hash_cache key ~compute:(fun () -> + let sibling_location = + Inputs.Location.sibling location + in + let sibling_hash = Inputs.get_hash t sibling_location in + let left_hash, right_hash = + Inputs.Location.order_siblings location hash + sibling_hash + in + Inputs.Hash.merge ~height left_hash right_hash ) in (key, parent_hash) :: acc | `Hash parent_hash -> (* We have already computed the hash above. *) (key, parent_hash) :: acc ) in - compute_affected_locations_and_hashes t rev_parent_locations_and_hashes + compute_affected_locations_and_hashes ?hash_cache t + rev_parent_locations_and_hashes (List.rev_append rev_parent_locations_and_hashes acc) else acc else acc - let set_hash_batch t locations_and_hashes = + let set_hash_batch ?hash_cache t locations_and_hashes = Inputs.set_raw_hash_batch t - (compute_affected_locations_and_hashes t locations_and_hashes + (compute_affected_locations_and_hashes ?hash_cache t locations_and_hashes locations_and_hashes ) let compute_last_index addresses = @@ -186,13 +207,17 @@ end = struct (* TODO: When we do batch on a database, we should add accounts, locations and hashes simulatenously for full atomicity. *) - let set_batch t locations_and_accounts = + let set_batch ?hash_cache t locations_and_accounts = set_raw_addresses t locations_and_accounts ; Inputs.set_raw_account_batch t locations_and_accounts ; - set_hash_batch t + set_hash_batch ?hash_cache t @@ List.map locations_and_accounts ~f:(fun (location, account) -> - ( Inputs.location_of_hash_addr (Inputs.Location.to_path_exn location) - , Inputs.Hash.hash_account account ) ) + let addr = Inputs.Location.to_path_exn location in + let account_hash = + lookup_hash ?hash_cache location ~compute:(fun () -> + Inputs.Hash.hash_account account ) + in + (Inputs.location_of_hash_addr addr, account_hash) ) let set_batch_accounts t addresses_and_accounts = set_batch t @@ -208,4 +233,6 @@ end = struct let num_accounts = List.length accounts in List.(zip_exn (take addresses num_accounts) accounts) |> set_batch_accounts t + + let set_hash_batch = set_hash_batch ?hash_cache:None end diff --git a/src/lib/merkle_ledger/util.mli b/src/lib/merkle_ledger/util.mli index 5473f664eb8..cec681679ee 100644 --- a/src/lib/merkle_ledger/util.mli +++ b/src/lib/merkle_ledger/util.mli @@ -57,7 +57,10 @@ module Make (Inputs : Inputs_intf) : sig Inputs.Base.t -> (Inputs.Location.t * Inputs.Hash.t) list -> unit val set_batch : - Inputs.Base.t -> (Inputs.Location.t * Inputs.Account.t) list -> unit + ?hash_cache:Inputs.Hash.t Inputs.Location.Addr.Map.t + -> Inputs.Base.t + -> (Inputs.Location.t * Inputs.Account.t) list + -> unit val set_batch_accounts : Inputs.Base.t -> (Inputs.Location.Addr.t * Inputs.Account.t) list -> unit diff --git a/src/lib/merkle_ledger_tests/test_mask.ml b/src/lib/merkle_ledger_tests/test_mask.ml index 76c661c0da1..cc224546757 100644 --- a/src/lib/merkle_ledger_tests/test_mask.ml +++ b/src/lib/merkle_ledger_tests/test_mask.ml @@ -65,11 +65,12 @@ module type Test_intf = sig (** Here we provide a base ledger and two layers of attached masks * one ontop another *) val with_chain : - ( Base.t - -> mask:Mask.Attached.t - -> mask_as_base:Base.t - -> mask2:Mask.Attached.t - -> 'a ) + ( Base.t (* base ledger *) + -> mask:(Mask.Attached.t * Base.t) lazy_t + -> (* first mask on top of base ledger *) + mask2:Mask.Attached.t lazy_t + -> (* second mask on top of the first mask *) + 'a ) -> 'a end @@ -234,22 +235,20 @@ module Make (Test : Test_intf) = struct let () = add_test "commit at layer2, dumps to layer1, not in base" (fun () -> - Test.with_chain (fun base ~mask:level1 ~mask_as_base:_ ~mask2:level2 -> - Mask.Attached.set level2 dummy_location dummy_account ; + Test.with_chain (fun base ~mask:m1_lazy ~mask2:m2_lazy -> + let m2 = Lazy.force m2_lazy in + let m1, _ = Lazy.force m1_lazy in + Mask.Attached.set m2 dummy_location dummy_account ; (* verify account is in the layer2 mask *) - assert ( - Mask.Attached.For_testing.location_in_mask level2 dummy_location ) ; - Mask.Attached.commit level2 ; + assert (Mask.Attached.For_testing.location_in_mask m2 dummy_location) ; + Mask.Attached.commit m2 ; (* account is no longer in layer2 *) assert ( - not - (Mask.Attached.For_testing.location_in_mask level2 - dummy_location ) ) ; + not (Mask.Attached.For_testing.location_in_mask m2 dummy_location) ) ; (* account is still not in base *) assert (Option.is_none @@ Maskable.get base dummy_location) ; (* account is present in layer1 *) - assert ( - Mask.Attached.For_testing.location_in_mask level1 dummy_location ) ) ) + assert (Mask.Attached.For_testing.location_in_mask m1 dummy_location) ) ) let () = add_test "register and unregister mask" (fun () -> @@ -389,7 +388,8 @@ module Make (Test : Test_intf) = struct "get_all_accounts should preserve the ordering of accounts by location \ with noncontiguous updates of accounts on the mask" (fun () -> (* see similar test in test_database *) - Test.with_chain (fun _ ~mask:mask1 ~mask_as_base:_ ~mask2 -> + Test.with_chain (fun _ ~mask:mask1_lazy ~mask2:mask2_lazy -> + let mask1, _ = Lazy.force mask1_lazy in let num_accounts = 1 lsl Test.depth in let gen_values gen list_length = Quickcheck.random_value @@ -413,6 +413,7 @@ module Make (Test : Test_intf) = struct |> List.unzip in let subset_balances = gen_values Balance.gen num_subset in + let mask2 = Lazy.force mask2_lazy in let subset_updated_accounts = List.map2_exn subset_accounts subset_balances ~f:(fun account balance -> @@ -610,7 +611,7 @@ module Make (Test : Test_intf) = struct let () = add_test "mask reparenting works" (fun () -> - Test.with_chain (fun base ~mask:m1 ~mask_as_base ~mask2:m2 -> + Test.with_chain (fun base ~mask:m1_lazy ~mask2:m2_lazy -> let num_accounts = 3 in let account_ids = Account_id.gen_accounts num_accounts in let balances = @@ -623,7 +624,9 @@ module Make (Test : Test_intf) = struct match accounts with | [ a1; a2; a3 ] -> let loc1 = parent_create_new_account_exn base a1 in + let m1, m1_base = Lazy.force m1_lazy in let loc2 = create_new_account_exn m1 a2 in + let m2 = Lazy.force m2_lazy in let loc3 = create_new_account_exn m2 a3 in let locs = [ (loc1, a1); (loc2, a2); (loc3, a3) ] in (* all accounts are here *) @@ -636,7 +639,7 @@ module Make (Test : Test_intf) = struct Mask.Attached.commit m1 ; [%test_result: Account.t option] ~message:"a2 is in base" ~expect:(Some a2) (Test.Base.get base loc2) ; - Maskable.remove_and_reparent_exn mask_as_base m1 ; + Maskable.remove_and_reparent_exn m1_base m1 ; [%test_result: Account.t option] ~message:"a1 is in base" ~expect:(Some a1) (Test.Base.get base loc1) ; [%test_result: Account.t option] ~message:"a2 is in base" @@ -783,14 +786,17 @@ module Make_maskable_and_mask_with_depth (Depth : Depth_S) = struct let with_chain f = with_instances (fun maskable mask -> - let attached1 = Maskable.register_mask maskable mask in - let attached1_as_base = - Any_base.cast (module Mask.Attached) attached1 + let attached1 = + lazy + (let m = Maskable.register_mask maskable mask in + (m, Any_base.cast (module Mask.Attached) m) ) + in + let attached2 = + lazy + ( Maskable.register_mask (snd @@ Lazy.force attached1) + @@ Mask.create ~depth:Depth.depth () ) in - let mask2 = Mask.create ~depth:Depth.depth () in - let attached2 = Maskable.register_mask attached1_as_base mask2 in - f maskable ~mask:attached1 ~mask_as_base:attached1_as_base - ~mask2:attached2 ) + f maskable ~mask:attached1 ~mask2:attached2 ) end module Make_maskable_and_mask (Depth : Depth_S) = diff --git a/src/lib/merkle_mask/maskable_merkle_tree.ml b/src/lib/merkle_mask/maskable_merkle_tree.ml index c4cd8a888bf..3b511213aea 100644 --- a/src/lib/merkle_mask/maskable_merkle_tree.ml +++ b/src/lib/merkle_mask/maskable_merkle_tree.ml @@ -244,8 +244,8 @@ module Make (Inputs : Inputs_intf) = struct List.iter accounts ~f:(fun account -> Mask.Attached.parent_set_notify mask account ) ) ) - let set_batch t locations_and_accounts = - Base.set_batch t locations_and_accounts ; + let set_batch ?hash_cache t locations_and_accounts = + Base.set_batch ?hash_cache t locations_and_accounts ; batch_notify_mask_children t (List.map locations_and_accounts ~f:snd) let set_batch_accounts t addresses_and_accounts = diff --git a/src/lib/merkle_mask/masking_merkle_tree.ml b/src/lib/merkle_mask/masking_merkle_tree.ml index 1b0e91ac1f3..d696870a791 100644 --- a/src/lib/merkle_mask/masking_merkle_tree.ml +++ b/src/lib/merkle_mask/masking_merkle_tree.ml @@ -32,17 +32,21 @@ module Make (Inputs : Inputs_intf.S) = struct ; token_owners : Account_id.t Token_id.Map.t ; hashes : Hash.t Addr.Map.t ; locations : Location.t Account_id.Map.t + ; non_existent_accounts : Account_id.Set.t } [@@deriving sexp] (** Merges second maps object into the first one, potentially overwriting some keys *) - let maps_merge base { accounts; token_owners; hashes; locations } = + let maps_merge base + { accounts; token_owners; hashes; locations; non_existent_accounts } = let combine ~key:_ _ v = v in { accounts = Map.merge_skewed ~combine base.accounts accounts ; token_owners = Map.merge_skewed ~combine base.token_owners token_owners ; hashes = Map.merge_skewed ~combine base.hashes hashes ; locations = Map.merge_skewed ~combine base.locations locations + ; non_existent_accounts = + Account_id.Set.union base.non_existent_accounts non_existent_accounts } (** Structure managing cache accumulated since the "base" ledger. @@ -94,11 +98,12 @@ module Make (Inputs : Inputs_intf.S) = struct type unattached = t [@@deriving sexp] - let empty_maps () = + let empty_maps = { accounts = Location_binable.Map.empty ; token_owners = Token_id.Map.empty ; hashes = Addr.Map.empty ; locations = Account_id.Map.empty + ; non_existent_accounts = Account_id.Set.empty } let create ~depth () = @@ -108,7 +113,7 @@ module Make (Inputs : Inputs_intf.S) = struct ; current_location = None ; depth ; accumulated = None - ; maps = empty_maps () + ; maps = empty_maps ; is_committing = false } @@ -218,6 +223,8 @@ module Make (Inputs : Inputs_intf.S) = struct update_maps t ~f:(fun maps -> { maps with locations = Map.set maps.locations ~key:account_id ~data:location + ; non_existent_accounts = + Set.remove maps.non_existent_accounts account_id } ) ; (* if account is at a hitherto-unused location, that becomes the current location @@ -274,34 +281,36 @@ module Make (Inputs : Inputs_intf.S) = struct | _ -> None ) in - let from_parent = lookup_parent ancestor not_found in - List.fold_map self_found_or_none ~init:from_parent - ~f:(fun from_parent (id, self_found) -> - match (self_found, from_parent) with - | None, r :: rest -> - (rest, r) - | Some acc_found_locally, _ -> - (from_parent, (id, acc_found_locally)) - | _ -> - failwith "unexpected number of results from DB" ) - |> snd + if List.is_empty not_found then + List.map ~f:(fun (a, x) -> (a, Option.value_exn x)) self_found_or_none + else + let from_parent = lookup_parent ancestor not_found in + List.fold_map self_found_or_none ~init:from_parent + ~f:(fun from_parent (id, self_found) -> + match (self_found, from_parent) with + | None, r :: rest -> + (rest, r) + | Some acc_found_locally, _ -> + (from_parent, (id, acc_found_locally)) + | _ -> + failwith "unexpected number of results from DB" ) + |> snd let get_batch t = - let self_find ~maps id = - let res = Map.find maps.accounts id in - let res = - if Option.is_none res then - let is_empty = - Option.value_map ~default:true t.current_location - ~f:(fun current_location -> - let address = Location.to_path_exn id in - let current_address = Location.to_path_exn current_location in - Addr.is_further_right ~than:current_address address ) - in - Option.some_if is_empty None - else Some res - in - (id, res) + let is_empty loc = + Option.value_map ~default:true t.current_location ~f:(fun cur_loc -> + let cur_addr = Location.to_path_exn cur_loc in + Addr.is_further_right ~than:cur_addr @@ Location.to_path_exn loc ) + in + let self_find ~maps:{ accounts; _ } id = + ( id + , match Map.find accounts id with + | None when is_empty id -> + Some None + | None -> + None + | s -> + Some s ) in self_find_or_batch_lookup self_find Base.get_batch t @@ -538,8 +547,9 @@ module Make (Inputs : Inputs_intf.S) = struct (Account_id.derive_token_id ~owner:account_id) account_id - (* a write writes only to the mask, parent is not involved need to update - both account and hash pieces of the mask *) + (* a write writes only to the mask, parent is not involved + + need to update both account and hash pieces of the mask *) let set t location account = assert_is_attached t ; set_account_unsafe t location account ; @@ -621,13 +631,9 @@ module Make (Inputs : Inputs_intf.S) = struct let parent = get_parent t in let old_root_hash = merkle_root t in let account_data = Map.to_alist t.maps.accounts in - t.maps <- - { accounts = Location_binable.Map.empty - ; hashes = Addr.Map.empty - ; token_owners = Token_id.Map.empty - ; locations = Account_id.Map.empty - } ; - Base.set_batch parent account_data ; + let hash_cache = t.maps.hashes in + t.maps <- empty_maps ; + Base.set_batch ~hash_cache parent account_data ; Debug_assert.debug_assert (fun () -> [%test_result: Hash.t] ~message: @@ -784,21 +790,23 @@ module Make (Inputs : Inputs_intf.S) = struct failwith "Expected mask current location to represent an account" ) + let self_lookup_account ~maps account_id = + if Set.mem maps.non_existent_accounts account_id then Some None + else Option.map ~f:Option.some @@ Map.find maps.locations account_id + let location_of_account t account_id = assert_is_attached t ; let maps, ancestor = maps_and_ancestor t in - let mask_result = Map.find maps.locations account_id in - match mask_result with - | Some _ -> - mask_result + match self_lookup_account ~maps account_id with + | Some r -> + r | None -> Base.location_of_account ancestor account_id - let location_of_account_batch t = + let location_of_account_batch = self_find_or_batch_lookup - (fun ~maps id -> - (id, Option.map ~f:Option.some @@ Map.find maps.locations id) ) - Base.location_of_account_batch t + (fun ~maps id -> (id, self_lookup_account ~maps id)) + Base.location_of_account_batch (* Adds specified accounts to the mask by laoding them from parent ledger. @@ -810,8 +818,29 @@ module Make (Inputs : Inputs_intf.S) = struct let unsafe_preload_accounts_from_parent t account_ids = assert_is_attached t ; let locations = location_of_account_batch t account_ids in - let non_empty_locations = List.filter_map locations ~f:snd in + let non_empty_locations, empty_keys = + List.partition_map locations ~f:(function + | _, Some loc -> + First loc + | key, None -> + Second key ) + in + update_maps t ~f:(fun maps -> + { maps with + non_existent_accounts = + Set.union maps.non_existent_accounts + (Account_id.Set.of_list empty_keys) + } ) ; let accounts = get_batch t non_empty_locations in + let empty_locations = + Option.value_map (last_filled t) ~default:[] ~f:(fun init -> + snd + @@ List.fold_map empty_keys ~init ~f:(fun loc _ -> + Location.next loc + |> Option.value_map ~default:(loc, loc) ~f:(fun loc' -> + (loc', loc') ) ) ) + in + let locations = empty_locations @ non_empty_locations in let all_hash_locations = let rec generate_locations account_locations acc = match account_locations with @@ -832,7 +861,7 @@ module Make (Inputs : Inputs_intf.S) = struct generate_locations account_locations (Location.Hash address :: acc) ) in - generate_locations non_empty_locations [] + generate_locations locations [] in let all_hashes = get_hash_batch_exn t all_hash_locations in (* Batch import merkle paths and self hashes. *) @@ -841,11 +870,7 @@ module Make (Inputs : Inputs_intf.S) = struct self_set_hash t address hash ) ; (* Batch import accounts. *) List.iter accounts ~f:(fun (location, account) -> - match account with - | None -> - () - | Some account -> - set_account_unsafe t location account ) + Option.iter account ~f:(set_account_unsafe t location) ) (* not needed for in-memory mask; in the database, it's currently a NOP *) let get_inner_hash_at_addr_exn t address = @@ -857,12 +882,7 @@ module Make (Inputs : Inputs_intf.S) = struct as sometimes this is desired behavior *) let close t = assert_is_attached t ; - t.maps <- - { t.maps with - accounts = Location_binable.Map.empty - ; hashes = Addr.Map.empty - ; locations = Account_id.Map.empty - } ; + t.maps <- empty_maps ; Async.Ivar.fill_if_empty t.detached_parent_signal () let index_of_account_exn t key = @@ -969,31 +989,38 @@ module Make (Inputs : Inputs_intf.S) = struct (* NB: updates the mutable current_location field in t *) let get_or_create_account t account_id account = assert_is_attached t ; - let maps, ancestor = maps_and_ancestor t in - match Map.find maps.locations account_id with + let { locations; non_existent_accounts; _ }, ancestor = + maps_and_ancestor t + in + let add_location () = + (* not in parent, create new location *) + let maybe_location = + match t.current_location with + | None -> + Some (first_location ~ledger_depth:t.depth) + | Some loc -> + Location.next loc + in + match maybe_location with + | None -> + Or_error.error_string "Db_error.Out_of_leaves" + | Some location -> + (* `set` calls `self_set_location`, which updates + the current location + *) + set t location account ; + Ok (`Added, location) + in + match Map.find locations account_id with | None -> ( - (* not in mask, maybe in parent *) - match Base.location_of_account ancestor account_id with - | Some location -> - Ok (`Existed, location) - | None -> ( - (* not in parent, create new location *) - let maybe_location = - match last_filled t with - | None -> - Some (first_location ~ledger_depth:t.depth) - | Some loc -> - Location.next loc - in - match maybe_location with - | None -> - Or_error.error_string "Db_error.Out_of_leaves" - | Some location -> - (* `set` calls `self_set_location`, which updates - the current location - *) - set t location account ; - Ok (`Added, location) ) ) + if Set.mem non_existent_accounts account_id then add_location () + else + (* not in mask, maybe in parent *) + match Base.location_of_account ancestor account_id with + | Some location -> + Ok (`Existed, location) + | None -> + add_location () ) | Some location -> Ok (`Existed, location) end diff --git a/src/lib/staged_ledger/staged_ledger.ml b/src/lib/staged_ledger/staged_ledger.ml index 91f6f6cf4db..af202350850 100644 --- a/src/lib/staged_ledger/staged_ledger.ml +++ b/src/lib/staged_ledger/staged_ledger.ml @@ -820,8 +820,9 @@ module T = struct ~f:(fun _ -> check (List.drop data (fst partitions.first)) partitions) partitions.second - let update_coinbase_stack_and_get_data ~logger ~constraint_constants - ~global_slot scan_state ledger pending_coinbase_collection transactions + let update_coinbase_stack_and_get_data_impl ~logger ~constraint_constants + ~global_slot ~first_partition_slots:slots ~no_second_partition + ~is_new_stack ledger pending_coinbase_collection transactions current_state_view state_and_body_hash = let open Deferred.Result.Let_syntax in let coinbase_exists txns = @@ -834,101 +835,104 @@ module T = struct Continue acc ) ~finish:Fn.id in + if no_second_partition then ( + (*Single partition: + 1.Check if a new stack is required and get a working stack [working_stack] + 2.create data for enqueuing onto the scan state *) + let%bind working_stack = + working_stack pending_coinbase_collection ~is_new_stack + |> Deferred.return + in + [%log internal] "Update_ledger_and_get_statements" + ~metadata:[ ("partition", `String "single") ] ; + let%map data, updated_stack, _, first_pass_ledger_end = + update_ledger_and_get_statements ~constraint_constants ~global_slot + ledger working_stack (transactions, None) current_state_view + state_and_body_hash + in + [%log internal] "Update_ledger_and_get_statements_done" ; + [%log internal] "Update_coinbase_stack_done" + ~metadata: + [ ("is_new_stack", `Bool is_new_stack) + ; ("transactions_len", `Int (List.length transactions)) + ; ("data_len", `Int (List.length data)) + ] ; + ( is_new_stack + , data + , Pending_coinbase.Update.Action.Update_one + , `Update_one updated_stack + , `First_pass_ledger_end first_pass_ledger_end ) ) + else + (*Two partition: + Assumption: Only one of the partition will have coinbase transaction(s)in it. + 1. Get the latest stack for coinbase in the first set of transactions + 2. get the first set of scan_state data[data1] + 3. get a new stack for the second partion because the second set of transactions would start from the begining of the next tree in the scan_state + 4. Initialize the new stack with the state from the first stack + 5. get the second set of scan_state data[data2]*) + let txns_for_partition1 = List.take transactions slots in + let coinbase_in_first_partition = coinbase_exists txns_for_partition1 in + let%bind working_stack1 = + working_stack pending_coinbase_collection ~is_new_stack:false + |> Deferred.return + in + let txns_for_partition2 = List.drop transactions slots in + [%log internal] "Update_ledger_and_get_statements" + ~metadata:[ ("partition", `String "both") ] ; + let%map data, updated_stack1, updated_stack2, first_pass_ledger_end = + update_ledger_and_get_statements ~constraint_constants ~global_slot + ledger working_stack1 + (txns_for_partition1, Some txns_for_partition2) + current_state_view state_and_body_hash + in + [%log internal] "Update_ledger_and_get_statements_done" ; + let second_has_data = List.length txns_for_partition2 > 0 in + let pending_coinbase_action, stack_update = + match (coinbase_in_first_partition, second_has_data) with + | true, true -> + ( Pending_coinbase.Update.Action.Update_two_coinbase_in_first + , `Update_two (updated_stack1, updated_stack2) ) + (*updated_stack2 does not have coinbase and but has the state from the previous stack*) + | true, false -> + (*updated_stack1 has some new coinbase but parition 2 has no + data and so we have only one stack to update*) + (Update_one, `Update_one updated_stack1) + | false, true -> + (*updated_stack1 just has the new state. [updated stack2] might have coinbase, definitely has some + data and therefore will have a non-dummy state.*) + ( Update_two_coinbase_in_second + , `Update_two (updated_stack1, updated_stack2) ) + | false, false -> + (* a diff consists of only non-coinbase transactions. This is currently not possible because a diff will have a coinbase at the very least, so don't update anything?*) + (Update_none, `Update_none) + in + [%log internal] "Update_coinbase_stack_done" + ~metadata: + [ ("is_new_stack", `Bool false) + ; ("coinbase_in_first_partition", `Bool coinbase_in_first_partition) + ; ("second_has_data", `Bool second_has_data) + ; ("txns_for_partition1_len", `Int (List.length txns_for_partition1)) + ; ("txns_for_partition2_len", `Int (List.length txns_for_partition2)) + ] ; + ( false + , data + , pending_coinbase_action + , stack_update + , `First_pass_ledger_end first_pass_ledger_end ) + + let update_coinbase_stack_and_get_data ~logger ~constraint_constants + ~global_slot scan_state ledger pending_coinbase_collection transactions + current_state_view state_and_body_hash = let { Scan_state.Space_partition.first = slots, _; second } = Scan_state.partition_if_overflowing scan_state in - if not @@ List.is_empty transactions then ( - match second with - | None -> - (*Single partition: - 1.Check if a new stack is required and get a working stack [working_stack] - 2.create data for enqueuing onto the scan state *) - let is_new_stack = Scan_state.next_on_new_tree scan_state in - let%bind working_stack = - working_stack pending_coinbase_collection ~is_new_stack - |> Deferred.return - in - [%log internal] "Update_ledger_and_get_statements" - ~metadata:[ ("partition", `String "single") ] ; - let%map data, updated_stack, _, first_pass_ledger_end = - update_ledger_and_get_statements ~constraint_constants ~global_slot - ledger working_stack (transactions, None) current_state_view - state_and_body_hash - in - [%log internal] "Update_ledger_and_get_statements_done" ; - [%log internal] "Update_coinbase_stack_done" - ~metadata: - [ ("is_new_stack", `Bool is_new_stack) - ; ("transactions_len", `Int (List.length transactions)) - ; ("data_len", `Int (List.length data)) - ] ; - ( is_new_stack - , data - , Pending_coinbase.Update.Action.Update_one - , `Update_one updated_stack - , `First_pass_ledger_end first_pass_ledger_end ) - | Some _ -> - (*Two partition: - Assumption: Only one of the partition will have coinbase transaction(s)in it. - 1. Get the latest stack for coinbase in the first set of transactions - 2. get the first set of scan_state data[data1] - 3. get a new stack for the second partion because the second set of transactions would start from the begining of the next tree in the scan_state - 4. Initialize the new stack with the state from the first stack - 5. get the second set of scan_state data[data2]*) - let txns_for_partition1 = List.take transactions slots in - let coinbase_in_first_partition = - coinbase_exists txns_for_partition1 - in - let%bind working_stack1 = - working_stack pending_coinbase_collection ~is_new_stack:false - |> Deferred.return - in - let txns_for_partition2 = List.drop transactions slots in - [%log internal] "Update_ledger_and_get_statements" - ~metadata:[ ("partition", `String "both") ] ; - let%map data, updated_stack1, updated_stack2, first_pass_ledger_end = - update_ledger_and_get_statements ~constraint_constants ~global_slot - ledger working_stack1 - (txns_for_partition1, Some txns_for_partition2) - current_state_view state_and_body_hash - in - [%log internal] "Update_ledger_and_get_statements_done" ; - let second_has_data = List.length txns_for_partition2 > 0 in - let pending_coinbase_action, stack_update = - match (coinbase_in_first_partition, second_has_data) with - | true, true -> - ( Pending_coinbase.Update.Action.Update_two_coinbase_in_first - , `Update_two (updated_stack1, updated_stack2) ) - (*updated_stack2 does not have coinbase and but has the state from the previous stack*) - | true, false -> - (*updated_stack1 has some new coinbase but parition 2 has no - data and so we have only one stack to update*) - (Update_one, `Update_one updated_stack1) - | false, true -> - (*updated_stack1 just has the new state. [updated stack2] might have coinbase, definitely has some - data and therefore will have a non-dummy state.*) - ( Update_two_coinbase_in_second - , `Update_two (updated_stack1, updated_stack2) ) - | false, false -> - (* a diff consists of only non-coinbase transactions. This is currently not possible because a diff will have a coinbase at the very least, so don't update anything?*) - (Update_none, `Update_none) - in - [%log internal] "Update_coinbase_stack_done" - ~metadata: - [ ("is_new_stack", `Bool false) - ; ( "coinbase_in_first_partition" - , `Bool coinbase_in_first_partition ) - ; ("second_has_data", `Bool second_has_data) - ; ( "txns_for_partition1_len" - , `Int (List.length txns_for_partition1) ) - ; ( "txns_for_partition2_len" - , `Int (List.length txns_for_partition2) ) - ] ; - ( false - , data - , pending_coinbase_action - , stack_update - , `First_pass_ledger_end first_pass_ledger_end ) ) + let is_new_stack = Scan_state.next_on_new_tree scan_state in + if not @@ List.is_empty transactions then + update_coinbase_stack_and_get_data_impl ~logger ~constraint_constants + ~global_slot ~first_partition_slots:slots + ~no_second_partition:(Option.is_none second) ~is_new_stack ledger + pending_coinbase_collection transactions current_state_view + state_and_body_hash else ( [%log internal] "Update_coinbase_stack_done" ; Deferred.return @@ -2343,6 +2347,9 @@ module Test_helpers = struct let dummy_state_view ?global_slot () = dummy_state_and_view ?global_slot () |> snd + + let update_coinbase_stack_and_get_data_impl = + update_coinbase_stack_and_get_data_impl end let%test_module "staged ledger tests" = diff --git a/src/lib/staged_ledger/staged_ledger.mli b/src/lib/staged_ledger/staged_ledger.mli index 90181dea908..805314908cb 100644 --- a/src/lib/staged_ledger/staged_ledger.mli +++ b/src/lib/staged_ledger/staged_ledger.mli @@ -77,7 +77,7 @@ module Scan_state : sig -> Mina_state.Protocol_state.value State_hash.With_state_hashes.t list Or_error.t - (** Apply transactions corresponding to the last emitted proof based on the + (** Apply transactions corresponding to the last emitted proof based on the two-pass system to get snarked ledger- first pass includes legacy transactions and zkapp payments and the second pass includes account updates. This ignores any account updates if a blocks transactions were split among two trees. *) val get_snarked_ledger_sync : @@ -104,7 +104,7 @@ module Scan_state : sig -> t -> unit Or_error.t - (** Apply transactions corresponding to the last emitted proof based on the + (** Apply transactions corresponding to the last emitted proof based on the two-pass system to get snarked ledger- first pass includes legacy transactions and zkapp payments and the second pass includes account updates. This ignores any account updates if a blocks transactions were split among two trees. *) val get_snarked_ledger_async : @@ -356,4 +356,28 @@ module Test_helpers : sig ?global_slot:Mina_numbers.Global_slot_since_genesis.t -> unit -> Zkapp_precondition.Protocol_state.View.t + + val update_coinbase_stack_and_get_data_impl : + logger:Logger.t + -> constraint_constants:Genesis_constants.Constraint_constants.t + -> global_slot:Mina_numbers.Global_slot_since_genesis.t + -> first_partition_slots:int + -> no_second_partition:bool + -> is_new_stack:bool + -> Ledger.t + -> Pending_coinbase.t + -> Transaction.t With_status.t list + -> Zkapp_precondition.Protocol_state.View.t + -> Frozen_ledger_hash.t * Frozen_ledger_hash.t + -> ( bool + * Transaction_snark_scan_state.Transaction_with_witness.t list + * Pending_coinbase.Update.Action.t + * [> `Update_none + | `Update_one of Pending_coinbase.Stack_versioned.t + | `Update_two of + Pending_coinbase.Stack_versioned.t + * Pending_coinbase.Stack_versioned.t ] + * [> `First_pass_ledger_end of Frozen_ledger_hash.t ] + , Staged_ledger_error.t ) + Deferred.Result.t end