From fc13c9d41e6e7144f866c5f30e07685181574f78 Mon Sep 17 00:00:00 2001 From: justanwar <42809091+justanwar@users.noreply.github.com> Date: Wed, 15 Nov 2023 01:46:45 +0800 Subject: [PATCH 01/13] Fix Github Actions (#1355) * Disable building libconsensus for Windows in CI * setuptools workaround for macOS --- .github/workflows/ci-master.yml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/.github/workflows/ci-master.yml b/.github/workflows/ci-master.yml index 6b2bb8af21..0ddad2d2b2 100644 --- a/.github/workflows/ci-master.yml +++ b/.github/workflows/ci-master.yml @@ -135,7 +135,7 @@ jobs: working-directory: ${{ env.SOURCE_ARTIFACT }} - name: Build Firo run: | - ./configure --disable-jni --enable-elysium --prefix=$(realpath depends/x86_64-w64-mingw32) + ./configure --without-libs --disable-jni --enable-elysium --prefix=$(realpath depends/x86_64-w64-mingw32) make -j$(nproc) working-directory: ${{ env.SOURCE_ARTIFACT }} - name: Prepare Files for Artifact @@ -167,6 +167,9 @@ jobs: run: sudo xcode-select -s /Applications/Xcode.app/Contents/Developer - name: Install Required Packages run: brew install automake coreutils pkg-config + # Workaround for macOS: https://github.com/actions/runner/issues/2958 + - name: Install setuptools + run: sudo -H pip install setuptools - name: Build Dependencies run: make -C depends -j$(sysctl -n hw.activecpu) working-directory: ${{ env.SOURCE_ARTIFACT }} From 3ba735dc29936b0113e057ff7ee44ad738f5210b Mon Sep 17 00:00:00 2001 From: psolstice Date: Wed, 15 Nov 2023 04:25:19 +0100 Subject: [PATCH 02/13] Fixed QT crash on startup (#1312) --- src/qt/transactionview.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/qt/transactionview.cpp b/src/qt/transactionview.cpp index 6cdf290499..bbd3e4b049 100644 --- a/src/qt/transactionview.cpp +++ b/src/qt/transactionview.cpp @@ -233,8 +233,8 @@ void TransactionView::setModel(WalletModel *_model) transactionView->setAlternatingRowColors(true); transactionView->setSelectionBehavior(QAbstractItemView::SelectRows); transactionView->setSelectionMode(QAbstractItemView::ExtendedSelection); + transactionView->horizontalHeader()->setSortIndicator(TransactionTableModel::Date, Qt::DescendingOrder); transactionView->setSortingEnabled(true); - transactionView->sortByColumn(TransactionTableModel::Date, Qt::DescendingOrder); transactionView->verticalHeader()->hide(); transactionView->setColumnWidth(TransactionTableModel::Status, STATUS_COLUMN_WIDTH); From 92a20507fb9beef489f9bd42bf27c4f21fbb9b5f Mon Sep 17 00:00:00 2001 From: Manish Kumar Date: Thu, 16 Nov 2023 06:19:33 +0530 Subject: [PATCH 03/13] Libspark fuzzing harness (#1340) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Spark address/key serialization functions implemented * CSparkWallet and mint db functions added * Spark mint tx creation, GetAvailableSparkCoins() implemented * Bind serial commitments to chain context * Use default curve generator for spend component * Completed merge * Use default address when auto minting * Adding domain separators when doing hashes * Adding version and checksum in address serialization * Saving serial_context into CSparkMintMeta * Moving Spark related code into separate directory * Embed KDF derived key size into hash instantiation * Generalize range proofs to arbitrary set sizes * Spark state implemented * More state functionality implemented * WIP: spend verification batching * Adding Spark state into mempool * Use a key-committing AEAD construction * More spark state functionality implemented * Spark balance proof fixed * Adds an encoding method for address scrambling * Switch from hex to `bech32m` encoding * Add tests * SpendTransaction refactored and added serialization for it * Review comment applied and fixed a bug in mint generation * Spark spend creation * Spark spend verification * More state functionality implemented and bug fixes * Some bug fixes and cleanup * Bug fixes and review comments resolved * More review comments resolved * Adding several rpc calls for spark * Adding more rpc calls * Failing unittests fixed * Version bump * Devnet Spark HF block * Added fuzz folder for persistent fuzzing libspark fuzzing * bech32 fuzzing harness for honggfuzz added * fix bech32_fuzz.cpp * Added f4grumble fuzzing harness * Forgot to add return statement to f4grumble_fuzz.cpp * Moved libspark related fuzzing to its own folder * Added FuzzedDataProvider.h * Integrated FuzzedDataProvider.h in bech32_fuzz.cpp * Fixed fuzzing enum in bech32_fuzz.cpp * missing semicolon * Added fuzz folder for persistent fuzzing libspark fuzzing bech32 fuzzing harness for honggfuzz added Added f4grumble fuzzing harness Revert "missing semicolon" This reverts commit 330cff61bd2ed9cefa0720a5b2ce5b356bfff777. Fuzzing directory cleanup Added FuzzedDataProvider.h * Added fuzzing utilities for group elements and scalars * Added fuzzing for aead.cpp * Added vector versions of GetGroupElement and GetScalar in fuzzing_utilities.cpp * Added single proof fuzz test for bpplus_fuzz * LelantusToSpark function implemented * Refactoring of file paths * lelantustospark rpc name refactored * Adding check to stop lelantus on consensus level * Added fuzz folder for persistent fuzzing libspark fuzzing bech32 fuzzing harness for honggfuzz added Added f4grumble fuzzing harness Revert "missing semicolon" This reverts commit 330cff61bd2ed9cefa0720a5b2ce5b356bfff777. Fuzzing directory cleanup Added FuzzedDataProvider.h * Added fuzzing utilities for group elements and scalars * Added fuzzing for aead.cpp * Added vector versions of GetGroupElement and GetScalar in fuzzing_utilities.cpp * Added single proof fuzz test for bpplus_fuzz * Changed relative paths of dependencies in secp256k1/src/cpp * More relative path changes in src/secp256k1 * Changed relative paths in crypto/common.h * Removed FuzzedSecp256k1Object deconstructor * Addedd aead_fuzz.cpp and other utilities Adding check to stop lelantus on consensus level Added fuzz folder for persistent fuzzing libspark fuzzing bech32 fuzzing harness for honggfuzz added Added f4grumble fuzzing harness Revert "missing semicolon" This reverts commit 330cff61bd2ed9cefa0720a5b2ce5b356bfff777. Fuzzing directory cleanup Added FuzzedDataProvider.h Added fuzzing utilities for group elements and scalars Added fuzzing for aead.cpp * Added vector versions of GetGroupElement and GetScalar in fuzzing_utilities.cpp * Added single proof fuzz test for bpplus_fuzz * Changed relative paths of dependencies in secp256k1/src/cpp * More relative path changes in src/secp256k1 * Changed relative paths in crypto/common.h * Removed FuzzedSecp256k1Object deconstructor * Added batch bpplus proofs fuzzing tests * Changed relative paths in libspark/params.h * Fixed errors in bpplus_fuzz.cpp * Fixed more errors * More fixes in bpplus.cpp * Completed bpplus_fuzz tests Added batch bpplus proofs fuzzing tests Changed relative paths in libspark/params.h Fixed errors in bpplus_fuzz.cpp Fixed more errors More fixes in bpplus.cpp Changed type of sizes in bpplus_fuzz.cpp * Added chaum_fuzz.cpp * Changed relative paths in src/wallet/crypter.h * Changed relative path in arith_uint256.h exit * Completed chaum fuzz tests Added chaum_fuzz.cpp Changed relative paths in src/wallet/crypter.h Changed relative path in arith_uint256.h exit * Added schnorr_fuzz.cpp * Addedd grootle_fuzz.cpp * Added length to ConsumeBytes call * Initialized sizes vector via a loop in grootle_fuzz.cpp * Changed relative paths in support/allocator/secure.h * Changed relative paths in support/allocator/secure.h again * Added mint_transaction_fuzz.cpp * Changed type of parameter in MintTransaction in mint_transaction_fuzz.cpp * Changed relative path in params.cpp * Changed relative paths in consensus/params.h * Changed relative paths in primitives/block.h * Changed relative paths in primitives/transaction.h * Changed relative paths in src/script/script.h * Changed more relative paths in src/script/script.h * Changed relative paths in crypto/MerkleTreeProof/mtp.h * Changed relative paths in crypto/MerkleTreeProof/mtp.h. Actually this time. * Changed relative paths in crypto/progpow.h * Completed mint_transaction_fuzz.cpp Added mint_transaction_fuzz.cpp Changed type of parameter in MintTransaction in mint_transaction_fuzz.cpp Changed relative path in params.cpp Changed relative paths in consensus/params.h Changed relative paths in primitives/block.h Changed relative paths in primitives/transaction.h Changed relative paths in src/script/script.h Changed more relative paths in src/script/script.h Changed relative paths in crypto/MerkleTreeProof/mtp.h Changed relative paths in crypto/MerkleTreeProof/mtp.h. Actually this time. Changed relative paths in crypto/progpow.h * Added spend_transaction_fuzz.cpp * Fixed errors in spend_transaction_fuzz.cpp * Completed spend_transaction_fuzz.cpp Added spend_transaction_fuzz.cpp Fixed errors in spend_transaction_fuzz.cpp * Updated relative paths in MultiExponent.cpp * fuzzing and coverage of different libspark files * makefile and code-coverage for libspark * spend transaction modified * coverage, input, results removed * empty and temporary files deleted * empty files removed * readme for fuzzing modified * original paths restored and conflict resolved, flags added in fuzz makefile * path fixed and merged latest spark * path fixed * instructions for installing dependencies added in fuzz readme * binary removed and loop length resized * readme modified & vetor size reserved in fuzzing_utilities --------- Co-authored-by: levonpetrosyan93 Co-authored-by: Aaron Feickert <66188213+AaronFeickert@users.noreply.github.com> Co-authored-by: Mikerah Co-authored-by: Onur İnanç Co-authored-by: HashCloak --- src/crypto/progpow.h | 2 +- src/fuzz/FuzzedDataProvider.h | 398 +++++++++++++++++++ src/fuzz/Makefile | 134 +++++++ src/fuzz/README.md | 142 +++++++ src/fuzz/fuzzing_utilities.cpp | 89 +++++ src/fuzz/fuzzing_utilities.h | 23 ++ src/fuzz/generate_coverage.sh | 21 + src/fuzz/libspark/aead_fuzz.cpp | 24 ++ src/fuzz/libspark/aead_fuzz_random_key.cpp | 24 ++ src/fuzz/libspark/bech32_fuzz.cpp | 46 +++ src/fuzz/libspark/bech32_fuzz_2.cpp | 62 +++ src/fuzz/libspark/bpplus_fuzz.cpp | 112 ++++++ src/fuzz/libspark/chaum_fuzz.cpp | 229 +++++++++++ src/fuzz/libspark/chaum_fuzz_2.cpp | 145 +++++++ src/fuzz/libspark/chaum_fuzz_member.cpp | 143 +++++++ src/fuzz/libspark/coin_fuzz.cpp | 72 ++++ src/fuzz/libspark/f4grumble_fuzz.cpp | 62 +++ src/fuzz/libspark/grootle_fuzz.cpp | 89 +++++ src/fuzz/libspark/grootle_fuzz_member.cpp | 90 +++++ src/fuzz/libspark/mint_transaction_fuzz.cpp | 34 ++ src/fuzz/libspark/schnorr_fuzz.cpp | 95 +++++ src/fuzz/libspark/spend_transaction_fuzz.cpp | 110 +++++ 22 files changed, 2145 insertions(+), 1 deletion(-) create mode 100644 src/fuzz/FuzzedDataProvider.h create mode 100644 src/fuzz/Makefile create mode 100644 src/fuzz/README.md create mode 100644 src/fuzz/fuzzing_utilities.cpp create mode 100644 src/fuzz/fuzzing_utilities.h create mode 100755 src/fuzz/generate_coverage.sh create mode 100644 src/fuzz/libspark/aead_fuzz.cpp create mode 100644 src/fuzz/libspark/aead_fuzz_random_key.cpp create mode 100644 src/fuzz/libspark/bech32_fuzz.cpp create mode 100644 src/fuzz/libspark/bech32_fuzz_2.cpp create mode 100644 src/fuzz/libspark/bpplus_fuzz.cpp create mode 100644 src/fuzz/libspark/chaum_fuzz.cpp create mode 100644 src/fuzz/libspark/chaum_fuzz_2.cpp create mode 100644 src/fuzz/libspark/chaum_fuzz_member.cpp create mode 100644 src/fuzz/libspark/coin_fuzz.cpp create mode 100644 src/fuzz/libspark/f4grumble_fuzz.cpp create mode 100644 src/fuzz/libspark/grootle_fuzz.cpp create mode 100644 src/fuzz/libspark/grootle_fuzz_member.cpp create mode 100644 src/fuzz/libspark/mint_transaction_fuzz.cpp create mode 100644 src/fuzz/libspark/schnorr_fuzz.cpp create mode 100644 src/fuzz/libspark/spend_transaction_fuzz.cpp diff --git a/src/crypto/progpow.h b/src/crypto/progpow.h index ba484d287d..6b1f7b5e4a 100644 --- a/src/crypto/progpow.h +++ b/src/crypto/progpow.h @@ -45,4 +45,4 @@ uint256 progpow_hash_full(const CProgPowHeader& header, uint256& mix_hash); /* Performs a light progpow hash (DAG loops excluded) provided header has mix_hash */ uint256 progpow_hash_light(const CProgPowHeader& header); -#endif // FIRO_PROGPOW_H +#endif // FIRO_PROGPOW_H \ No newline at end of file diff --git a/src/fuzz/FuzzedDataProvider.h b/src/fuzz/FuzzedDataProvider.h new file mode 100644 index 0000000000..9f66afc9e7 --- /dev/null +++ b/src/fuzz/FuzzedDataProvider.h @@ -0,0 +1,398 @@ +//===- FuzzedDataProvider.h - Utility header for fuzz targets ---*- C++ -* ===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// A single header library providing an utility class to break up an array of +// bytes. Whenever run on the same input, provides the same output, as long as +// its methods are called in the same order, with the same arguments. +//===----------------------------------------------------------------------===// + +#ifndef LLVM_FUZZER_FUZZED_DATA_PROVIDER_H_ +#define LLVM_FUZZER_FUZZED_DATA_PROVIDER_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +// In addition to the comments below, the API is also briefly documented at +// https://github.com/google/fuzzing/blob/master/docs/split-inputs.md#fuzzed-data-provider +class FuzzedDataProvider { + public: + // |data| is an array of length |size| that the FuzzedDataProvider wraps to + // provide more granular access. |data| must outlive the FuzzedDataProvider. + FuzzedDataProvider(const uint8_t *data, size_t size) + : data_ptr_(data), remaining_bytes_(size) {} + ~FuzzedDataProvider() = default; + + // See the implementation below (after the class definition) for more verbose + // comments for each of the methods. + + // Methods returning std::vector of bytes. These are the most popular choice + // when splitting fuzzing input into pieces, as every piece is put into a + // separate buffer (i.e. ASan would catch any under-/overflow) and the memory + // will be released automatically. + template std::vector ConsumeBytes(size_t num_bytes); + template + std::vector ConsumeBytesWithTerminator(size_t num_bytes, T terminator = 0); + template std::vector ConsumeRemainingBytes(); + + // Methods returning strings. Use only when you need a std::string or a null + // terminated C-string. Otherwise, prefer the methods returning std::vector. + std::string ConsumeBytesAsString(size_t num_bytes); + std::string ConsumeRandomLengthString(size_t max_length); + std::string ConsumeRandomLengthString(); + std::string ConsumeRemainingBytesAsString(); + + // Methods returning integer values. + template T ConsumeIntegral(); + template T ConsumeIntegralInRange(T min, T max); + + // Methods returning floating point values. + template T ConsumeFloatingPoint(); + template T ConsumeFloatingPointInRange(T min, T max); + + // 0 <= return value <= 1. + template T ConsumeProbability(); + + bool ConsumeBool(); + + // Returns a value chosen from the given enum. + template T ConsumeEnum(); + + // Returns a value from the given array. + template T PickValueInArray(const T (&array)[size]); + template + T PickValueInArray(const std::array &array); + template T PickValueInArray(std::initializer_list list); + + // Writes data to the given destination and returns number of bytes written. + size_t ConsumeData(void *destination, size_t num_bytes); + + // Reports the remaining bytes available for fuzzed input. + size_t remaining_bytes() { return remaining_bytes_; } + + private: + FuzzedDataProvider(const FuzzedDataProvider &) = delete; + FuzzedDataProvider &operator=(const FuzzedDataProvider &) = delete; + + void CopyAndAdvance(void *destination, size_t num_bytes); + + void Advance(size_t num_bytes); + + template + std::vector ConsumeBytes(size_t size, size_t num_bytes); + + template TS ConvertUnsignedToSigned(TU value); + + const uint8_t *data_ptr_; + size_t remaining_bytes_; +}; + +// Returns a std::vector containing |num_bytes| of input data. If fewer than +// |num_bytes| of data remain, returns a shorter std::vector containing all +// of the data that's left. Can be used with any byte sized type, such as +// char, unsigned char, uint8_t, etc. +template +std::vector FuzzedDataProvider::ConsumeBytes(size_t num_bytes) { + num_bytes = std::min(num_bytes, remaining_bytes_); + return ConsumeBytes(num_bytes, num_bytes); +} + +// Similar to |ConsumeBytes|, but also appends the terminator value at the end +// of the resulting vector. Useful, when a mutable null-terminated C-string is +// needed, for example. But that is a rare case. Better avoid it, if possible, +// and prefer using |ConsumeBytes| or |ConsumeBytesAsString| methods. +template +std::vector FuzzedDataProvider::ConsumeBytesWithTerminator(size_t num_bytes, + T terminator) { + num_bytes = std::min(num_bytes, remaining_bytes_); + std::vector result = ConsumeBytes(num_bytes + 1, num_bytes); + result.back() = terminator; + return result; +} + +// Returns a std::vector containing all remaining bytes of the input data. +template +std::vector FuzzedDataProvider::ConsumeRemainingBytes() { + return ConsumeBytes(remaining_bytes_); +} + +// Returns a std::string containing |num_bytes| of input data. Using this and +// |.c_str()| on the resulting string is the best way to get an immutable +// null-terminated C string. If fewer than |num_bytes| of data remain, returns +// a shorter std::string containing all of the data that's left. +inline std::string FuzzedDataProvider::ConsumeBytesAsString(size_t num_bytes) { + static_assert(sizeof(std::string::value_type) == sizeof(uint8_t), + "ConsumeBytesAsString cannot convert the data to a string."); + + num_bytes = std::min(num_bytes, remaining_bytes_); + std::string result( + reinterpret_cast(data_ptr_), num_bytes); + Advance(num_bytes); + return result; +} + +// Returns a std::string of length from 0 to |max_length|. When it runs out of +// input data, returns what remains of the input. Designed to be more stable +// with respect to a fuzzer inserting characters than just picking a random +// length and then consuming that many bytes with |ConsumeBytes|. +inline std::string +FuzzedDataProvider::ConsumeRandomLengthString(size_t max_length) { + // Reads bytes from the start of |data_ptr_|. Maps "\\" to "\", and maps "\" + // followed by anything else to the end of the string. As a result of this + // logic, a fuzzer can insert characters into the string, and the string + // will be lengthened to include those new characters, resulting in a more + // stable fuzzer than picking the length of a string independently from + // picking its contents. + std::string result; + + // Reserve the anticipated capaticity to prevent several reallocations. + result.reserve(std::min(max_length, remaining_bytes_)); + for (size_t i = 0; i < max_length && remaining_bytes_ != 0; ++i) { + char next = ConvertUnsignedToSigned(data_ptr_[0]); + Advance(1); + if (next == '\\' && remaining_bytes_ != 0) { + next = ConvertUnsignedToSigned(data_ptr_[0]); + Advance(1); + if (next != '\\') + break; + } + result += next; + } + + result.shrink_to_fit(); + return result; +} + +// Returns a std::string of length from 0 to |remaining_bytes_|. +inline std::string FuzzedDataProvider::ConsumeRandomLengthString() { + return ConsumeRandomLengthString(remaining_bytes_); +} + +// Returns a std::string containing all remaining bytes of the input data. +// Prefer using |ConsumeRemainingBytes| unless you actually need a std::string +// object. +inline std::string FuzzedDataProvider::ConsumeRemainingBytesAsString() { + return ConsumeBytesAsString(remaining_bytes_); +} + +// Returns a number in the range [Type's min, Type's max]. The value might +// not be uniformly distributed in the given range. If there's no input data +// left, always returns |min|. +template T FuzzedDataProvider::ConsumeIntegral() { + return ConsumeIntegralInRange(std::numeric_limits::min(), + std::numeric_limits::max()); +} + +// Returns a number in the range [min, max] by consuming bytes from the +// input data. The value might not be uniformly distributed in the given +// range. If there's no input data left, always returns |min|. |min| must +// be less than or equal to |max|. +template +T FuzzedDataProvider::ConsumeIntegralInRange(T min, T max) { + static_assert(std::is_integral::value, "An integral type is required."); + static_assert(sizeof(T) <= sizeof(uint64_t), "Unsupported integral type."); + + if (min > max) + abort(); + + // Use the biggest type possible to hold the range and the result. + uint64_t range = static_cast(max) - min; + uint64_t result = 0; + size_t offset = 0; + + while (offset < sizeof(T) * CHAR_BIT && (range >> offset) > 0 && + remaining_bytes_ != 0) { + // Pull bytes off the end of the seed data. Experimentally, this seems to + // allow the fuzzer to more easily explore the input space. This makes + // sense, since it works by modifying inputs that caused new code to run, + // and this data is often used to encode length of data read by + // |ConsumeBytes|. Separating out read lengths makes it easier modify the + // contents of the data that is actually read. + --remaining_bytes_; + result = (result << CHAR_BIT) | data_ptr_[remaining_bytes_]; + offset += CHAR_BIT; + } + + // Avoid division by 0, in case |range + 1| results in overflow. + if (range != std::numeric_limits::max()) + result = result % (range + 1); + + return static_cast(min + result); +} + +// Returns a floating point value in the range [Type's lowest, Type's max] by +// consuming bytes from the input data. If there's no input data left, always +// returns approximately 0. +template T FuzzedDataProvider::ConsumeFloatingPoint() { + return ConsumeFloatingPointInRange(std::numeric_limits::lowest(), + std::numeric_limits::max()); +} + +// Returns a floating point value in the given range by consuming bytes from +// the input data. If there's no input data left, returns |min|. Note that +// |min| must be less than or equal to |max|. +template +T FuzzedDataProvider::ConsumeFloatingPointInRange(T min, T max) { + if (min > max) + abort(); + + T range = .0; + T result = min; + constexpr T zero(.0); + if (max > zero && min < zero && max > min + std::numeric_limits::max()) { + // The diff |max - min| would overflow the given floating point type. Use + // the half of the diff as the range and consume a bool to decide whether + // the result is in the first of the second part of the diff. + range = (max / 2.0) - (min / 2.0); + if (ConsumeBool()) { + result += range; + } + } else { + range = max - min; + } + + return result + range * ConsumeProbability(); +} + +// Returns a floating point number in the range [0.0, 1.0]. If there's no +// input data left, always returns 0. +template T FuzzedDataProvider::ConsumeProbability() { + static_assert(std::is_floating_point::value, + "A floating point type is required."); + + // Use different integral types for different floating point types in order + // to provide better density of the resulting values. + using IntegralType = + typename std::conditional<(sizeof(T) <= sizeof(uint32_t)), uint32_t, + uint64_t>::type; + + T result = static_cast(ConsumeIntegral()); + result /= static_cast(std::numeric_limits::max()); + return result; +} + +// Reads one byte and returns a bool, or false when no data remains. +inline bool FuzzedDataProvider::ConsumeBool() { + return 1 & ConsumeIntegral(); +} + +// Returns an enum value. The enum must start at 0 and be contiguous. It must +// also contain |kMaxValue| aliased to its largest (inclusive) value. Such as: +// enum class Foo { SomeValue, OtherValue, kMaxValue = OtherValue }; +template T FuzzedDataProvider::ConsumeEnum() { + static_assert(std::is_enum::value, "|T| must be an enum type."); + return static_cast( + ConsumeIntegralInRange(0, static_cast(T::kMaxValue))); +} + +// Returns a copy of the value selected from the given fixed-size |array|. +template +T FuzzedDataProvider::PickValueInArray(const T (&array)[size]) { + static_assert(size > 0, "The array must be non empty."); + return array[ConsumeIntegralInRange(0, size - 1)]; +} + +template +T FuzzedDataProvider::PickValueInArray(const std::array &array) { + static_assert(size > 0, "The array must be non empty."); + return array[ConsumeIntegralInRange(0, size - 1)]; +} + +template +T FuzzedDataProvider::PickValueInArray(std::initializer_list list) { + // TODO(Dor1s): switch to static_assert once C++14 is allowed. + if (!list.size()) + abort(); + + return *(list.begin() + ConsumeIntegralInRange(0, list.size() - 1)); +} + +// Writes |num_bytes| of input data to the given destination pointer. If there +// is not enough data left, writes all remaining bytes. Return value is the +// number of bytes written. +// In general, it's better to avoid using this function, but it may be useful +// in cases when it's necessary to fill a certain buffer or object with +// fuzzing data. +inline size_t FuzzedDataProvider::ConsumeData(void *destination, + size_t num_bytes) { + num_bytes = std::min(num_bytes, remaining_bytes_); + CopyAndAdvance(destination, num_bytes); + return num_bytes; +} + +// Private methods. +inline void FuzzedDataProvider::CopyAndAdvance(void *destination, + size_t num_bytes) { + std::memcpy(destination, data_ptr_, num_bytes); + Advance(num_bytes); +} + +inline void FuzzedDataProvider::Advance(size_t num_bytes) { + if (num_bytes > remaining_bytes_) + abort(); + + data_ptr_ += num_bytes; + remaining_bytes_ -= num_bytes; +} + +template +std::vector FuzzedDataProvider::ConsumeBytes(size_t size, size_t num_bytes) { + static_assert(sizeof(T) == sizeof(uint8_t), "Incompatible data type."); + + // The point of using the size-based constructor below is to increase the + // odds of having a vector object with capacity being equal to the length. + // That part is always implementation specific, but at least both libc++ and + // libstdc++ allocate the requested number of bytes in that constructor, + // which seems to be a natural choice for other implementations as well. + // To increase the odds even more, we also call |shrink_to_fit| below. + std::vector result(size); + if (size == 0) { + if (num_bytes != 0) + abort(); + return result; + } + + CopyAndAdvance(result.data(), num_bytes); + + // Even though |shrink_to_fit| is also implementation specific, we expect it + // to provide an additional assurance in case vector's constructor allocated + // a buffer which is larger than the actual amount of data we put inside it. + result.shrink_to_fit(); + return result; +} + +template +TS FuzzedDataProvider::ConvertUnsignedToSigned(TU value) { + static_assert(sizeof(TS) == sizeof(TU), "Incompatible data types."); + static_assert(!std::numeric_limits::is_signed, + "Source type must be unsigned."); + + // TODO(Dor1s): change to `if constexpr` once C++17 becomes mainstream. + if (std::numeric_limits::is_modulo) + return static_cast(value); + + // Avoid using implementation-defined unsigned to signed conversions. + // To learn more, see https://stackoverflow.com/questions/13150449. + if (value <= std::numeric_limits::max()) { + return static_cast(value); + } else { + constexpr auto TS_min = std::numeric_limits::min(); + return TS_min + static_cast(value - TS_min); + } +} + +#endif // LLVM_FUZZER_FUZZED_DATA_PROVIDER_H_ + diff --git a/src/fuzz/Makefile b/src/fuzz/Makefile new file mode 100644 index 0000000000..a2d2979e79 --- /dev/null +++ b/src/fuzz/Makefile @@ -0,0 +1,134 @@ +CXX := hfuzz-clang++ + +CXXFLAGS := -DHAVE_CONFIG_H -I../../src/ -iquote ../../src/config/ -iquote ../secp256k1/ -iquote ../secp256k1/src/ -iquote ../secp256k1/include/ +CXXFLAGS2 := -DHAVE_CONFIG_H + +LIBS := -lcrypto -lstdc++ -lboost_thread -lboost_filesystem -lboost_program_options -lboost_chrono +LIBS2 := -lstdc++ -lcrypto + +INCLUDE_HEADER := -include ../streams.h -include ../version.h + +BPPLUS_SRCS := libspark/bpplus_fuzz.cpp ../libspark/bpplus.cpp ../libspark/util.cpp fuzzing_utilities.cpp ../libspark/hash.cpp ../libspark/kdf.cpp ../libspark/transcript.cpp ../crypto/aes.cpp ../crypto/chacha20.cpp ../crypto/sha512.cpp ../secp256k1/src/cpp/Scalar.cpp ../secp256k1/src/cpp/GroupElement.cpp ../secp256k1/src/cpp/MultiExponent.cpp ../support/cleanse.cpp ../util.cpp ../utiltime.cpp ../utilstrencodings.cpp ../random.cpp ../chainparamsbase.cpp +BPPLUS_OUTPUT := libspark/bpplus_hfuzz +BPPLUS_OUTPUT_DEBUG := libspark/bpplus_debug + +BECH32_SRCS := libspark/bech32_fuzz_2.cpp ../libspark/bech32.cpp +BECH32_OUTPUT := libspark/bech32_hfuzz +BECH32_OUTPUT_DEBUG := libspark/bech32_debug + +AEAD_SRCS := libspark/aead_fuzz.cpp ../libspark/aead.cpp ../libspark/util.cpp ../libspark/kdf.cpp ../libspark/hash.cpp ../fuzz/fuzzing_utilities.cpp ../crypto/aes.cpp ../support/lockedpool.cpp ../support/cleanse.cpp ../secp256k1/src/cpp/Scalar.cpp ../secp256k1/src/cpp/GroupElement.cpp +AEAD_OUTPUT := libspark/aead_hfuzz +AEAD_OUTPUT_DEBUG := libspark/aead_debug + +GROOTLE_SRCS := libspark/grootle_fuzz.cpp ../libspark/grootle.cpp ../libspark/util.cpp fuzzing_utilities.cpp ../libspark/hash.cpp ../libspark/kdf.cpp ../libspark/transcript.cpp ../crypto/aes.cpp ../crypto/chacha20.cpp ../crypto/sha512.cpp ../secp256k1/src/cpp/Scalar.cpp ../secp256k1/src/cpp/GroupElement.cpp ../secp256k1/src/cpp/MultiExponent.cpp ../support/cleanse.cpp ../util.cpp ../utiltime.cpp ../utilstrencodings.cpp ../random.cpp ../chainparamsbase.cpp +GROOTLE_OUTPUT := libspark/grootle_hfuzz +GROOTLE_OUTPUT_DEBUG := libspark/grootle_debug + +CHAUM_SRCS := libspark/chaum_fuzz.cpp ../libspark/chaum.cpp ../libspark/transcript.cpp fuzzing_utilities.cpp ../secp256k1/src/cpp/Scalar.cpp ../secp256k1/src/cpp/GroupElement.cpp ../secp256k1/src/cpp/MultiExponent.cpp ../support/cleanse.cpp +CHAUM_OUTPUT := libspark/chaum_hfuzz +CHAUM_OUTPUT_DEBUG := libspark/chaum_debug + +SCHNORR_SRCS := libspark/schnorr_fuzz.cpp ../libspark/schnorr.cpp ../fuzz/fuzzing_utilities.cpp ../secp256k1/src/cpp/Scalar.cpp ../secp256k1/src/cpp/GroupElement.cpp ../secp256k1/src/cpp/MultiExponent.cpp ../libspark/transcript.cpp ../support/cleanse.cpp +SCHNORR_OUTPUT := libspark/schnorr_hfuzz +SCHNORR_OUTPUT_DEBUG := libspark/schnorr_debug + +COIN_SRCS := libspark/coin_fuzz.cpp ../libspark/coin.cpp ../libspark/params.cpp ../crypto/aes.cpp ../crypto/ripemd160.cpp ../crypto/sha256.cpp ../secp256k1/src/cpp/Scalar.cpp ../secp256k1/src/cpp/GroupElement.cpp ../secp256k1/src/cpp/MultiExponent.cpp ../support/*.cpp ../uint256.cpp ../utilstrencodings.cpp fuzzing_utilities.cpp ../libspark/aead.cpp ../libspark/util.cpp ../libspark/keys.cpp ../libspark/f4grumble.cpp ../libspark/hash.cpp ../libspark/bech32.cpp ../libspark/kdf.cpp +COIN_OUTPUT := libspark/coin_hfuzz +COIN_OUTPUT_DEBUG := libspark/coin_debug + +MINT_TRANSACTION_SRCS := libspark/mint_transaction_fuzz.cpp ../libspark/mint_transaction.cpp ../libspark/coin.cpp ../libspark/keys.cpp ../libspark/schnorr.cpp ../fuzz/fuzzing_utilities.cpp ../libspark/util.cpp ../libspark/hash.cpp ../libspark/kdf.cpp ../libspark/transcript.cpp ../libspark/f4grumble.cpp ../libspark/params.cpp ../libspark/bech32.cpp ../libspark/aead.cpp ../crypto/aes.cpp ../crypto/ripemd160.cpp ../crypto/sha256.cpp ../secp256k1/src/cpp/Scalar.cpp ../secp256k1/src/cpp/GroupElement.cpp ../secp256k1/src/cpp/MultiExponent.cpp ../support/cleanse.cpp ../uint256.cpp ../utilstrencodings.cpp +MINT_TRANSACTION_OUTPUT := libspark/mint_transaction_hfuzz +MINT_TRANSACTION_OUTPUT_DEBUG := libspark/mint_transaction_debug + +SPEND_TRANSACTION_SRCS := libspark/spend_transaction_fuzz.cpp ../libspark/spend_transaction.cpp ../libspark/coin.cpp ../libspark/keys.cpp ../libspark/schnorr.cpp ../fuzz/fuzzing_utilities.cpp ../libspark/util.cpp ../libspark/hash.cpp ../libspark/kdf.cpp ../libspark/transcript.cpp ../libspark/f4grumble.cpp ../libspark/params.cpp ../libspark/bech32.cpp ../libspark/aead.cpp ../libspark/chaum.cpp ../libspark/bpplus.cpp ../libspark/grootle.cpp ../crypto/aes.cpp ../crypto/ripemd160.cpp ../crypto/sha256.cpp ../crypto/chacha20.cpp ../crypto/sha512.cpp ../secp256k1/src/cpp/Scalar.cpp ../secp256k1/src/cpp/GroupElement.cpp ../secp256k1/src/cpp/MultiExponent.cpp ../support/cleanse.cpp ../uint256.cpp ../utilstrencodings.cpp ../util.cpp ../utiltime.cpp ../chainparamsbase.cpp ../random.cpp +SPEND_TRANSACTION_OUTPUT := libspark/spend_transaction_hfuzz +SPEND_TRANSACTION_OUTPUT_DEBUG := libspark/spend_transaction_debug + +F4GRUMBLE_SRCS := libspark/f4grumble_fuzz.cpp ../libspark/f4grumble.cpp ../libspark/util.cpp ../libspark/kdf.cpp ../libspark/hash.cpp ../crypto/aes.cpp ../support/lockedpool.cpp ../support/cleanse.cpp ../secp256k1/src/cpp/Scalar.cpp ../secp256k1/src/cpp/GroupElement.cpp +F4GRUMBLE_OUTPUT := libspark/f4grumble_hfuzz +F4GRUMBLE_OUTPUT_DEBUG := libspark/f4grumble_debug + +DEBUG_FLAGS := -g -O0 -ggdb + +bpplus: $(BPPLUS_OUTPUT) +$(BPPLUS_OUTPUT): $(BPPLUS_SRCS) + $(CXX) $(CXXFLAGS) $^ -o $@ $(LIBS) + +bpplus_debug: $(BPPLUS_OUTPUT_DEBUG) +$(BPPLUS_OUTPUT_DEBUG): $(BPPLUS_SRCS) + $(CXX) $(DEBUG_FLAGS) $(CXXFLAGS) $^ -o $@ $(LIBS) + +bech32: $(BECH32_OUTPUT) +$(BECH32_OUTPUT): $(BECH32_SRCS) + $(CXX) $(CXXFLAGS2) $^ -o $@ $(LIBS2) + +bech32_debug: $(BECH32_OUTPUT_DEBUG) +$(BECH32_OUTPUT_DEBUG): $(BECH32_SRCS) + $(CXX) $(DEBUG_FLAGS) $(CXXFLAGS2) $^ -o $@ $(LIBS2) + +aead: $(AEAD_OUTPUT) +$(AEAD_OUTPUT): $(AEAD_SRCS) + $(CXX) $(CXXFLAGS) $^ -o $@ $(LIBS2) + +aead_debug: $(AEAD_OUTPUT_DEBUG) +$(AEAD_OUTPUT_DEBUG): $(AEAD_SRCS) + $(CXX) $(DEBUG_FLAGS) $(CXXFLAGS) $^ -o $@ $(LIBS2) + +grootle: $(GROOTLE_OUTPUT) +$(GROOTLE_OUTPUT): $(GROOTLE_SRCS) + $(CXX) $(CXXFLAGS) $^ -o $@ $(LIBS) + +grootle_debug: $(GROOTLE_OUTPUT_DEBUG) +$(GROOTLE_OUTPUT_DEBUG): $(GROOTLE_SRCS) + $(CXX) $(DEBUG_FLAGS) $(CXXFLAGS) $^ -o $@ $(LIBS) + +chaum: $(CHAUM_OUTPUT) +$(CHAUM_OUTPUT): $(CHAUM_SRCS) + $(CXX) $(CXXFLAGS) $^ -o $@ $(INCLUDE_HEADER) $(LIBS) + +chaum_debug: $(CHAUM_OUTPUT_DEBUG) +$(CHAUM_OUTPUT_DEBUG): $(CHAUM_SRCS) + $(CXX) $(DEBUG_FLAGS) $(CXXFLAGS2) $^ -o $@ $(INCLUDE_HEADER) $(LIBS) + +schnorr: $(SCHNORR_OUTPUT) +$(SCHNORR_OUTPUT): $(SCHNORR_SRCS) + $(CXX) $(CXXFLAGS) $^ -o $@ $(INCLUDE_HEADER) $(LIBS) + +schnorr_debug: $(SCHNORR_OUTPUT_DEBUG) +$(SCHNORR_OUTPUT_DEBUG): $(SCHNORR_SRCS) + $(CXX) $(DEBUG_FLAGS) $(CXXFLAGS) $^ -o $@ $(INCLUDE_HEADER) $(LIBS) + +coin: $(COIN_OUTPUT) +$(COIN_OUTPUT): $(COIN_SRCS) + $(CXX) $(CXXFLAGS) $^ -o $@ $(LIBS) + +coin_debug: $(COIN_OUTPUT_DEBUG) +$(COIN_OUTPUT_DEBUG): $(COIN_SRCS) + $(CXX) $(DEBUG_FLAGS) $(CXXFLAGS) $^ -o $@ $(LIBS) + +mint_transaction: $(MINT_TRANSACTION_OUTPUT) +$(MINT_TRANSACTION_OUTPUT): $(MINT_TRANSACTION_SRCS) + $(CXX) $(CXXFLAGS) $^ -o $@ $(LIBS2) + +mint_transaction_debug: $(MINT_TRANSACTION_OUTPUT_DEBUG) +$(MINT_TRANSACTION_OUTPUT_DEBUG): $(MINT_TRANSACTION_SRCS) + $(CXX) $(DEBUG_FLAGS) $(CXXFLAGS2) $^ -o $@ $(LIBS2) + +spend_transaction: $(SPEND_TRANSACTION_OUTPUT) +$(SPEND_TRANSACTION_OUTPUT): $(SPEND_TRANSACTION_SRCS) + $(CXX) $(CXXFLAGS) $^ -o $@ $(LIBS) + +spend_transaction_debug: $(SPEND_TRANSACTION_OUTPUT_DEBUG) +$(SPEND_TRANSACTION_OUTPUT_DEBUG): $(SPEND_TRANSACTION_SRCS) + $(CXX) $(DEBUG_FLAGS) $(CXXFLAGS) $^ -o $@ $(LIBS) + +f4grumble: $(F4GRUMBLE_OUTPUT) +$(F4GRUMBLE_OUTPUT): $(F4GRUMBLE_SRCS) + $(CXX) $(CXXFLAGS) $^ -o $@ $(LIBS) + +f4grumble_debug: $(F4GRUMBLE_OUTPUT_DEBUG) +$(F4GRUMBLE_OUTPUT_DEBUG): $(F4GRUMBLE_SRCS) + $(CXX) $(DEBUG_FLAGS) $(CXXFLAGS) $^ -o $@ $(LIBS) + +clean: + rm -f $(BPPLUS_OUTPUT) $(BPPLUS_OUTPUT_DEBUG) $(BECH32_OUTPUT) $(BECH32_OUTPUT_DEBUG) $(AEAD_OUTPUT) $(AEAD_OUTPUT_DEBUG) $(GROOTLE_OUTPUT) $(GROOTLE_OUTPUT_DEBUG) $(CHAUM_OUTPUT) $(CHAUM_OUTPUT_DEBUG) $(SCHNORR_OUTPUT) $(SCHNORR_OUTPUT_DEBUG) $(COIN_OUTPUT) $(COIN_OUTPUT_DEBUG) $(MINT_TRANSACTION_OUTPUT) $(MINT_TRANSACTION_OUTPUT_DEBUG) $(SPEND_TRANSACTION_OUTPUT) $(SPEND_TRANSACTION_OUTPUT_DEBUG) *.o diff --git a/src/fuzz/README.md b/src/fuzz/README.md new file mode 100644 index 0000000000..e3c68835e9 --- /dev/null +++ b/src/fuzz/README.md @@ -0,0 +1,142 @@ +# Fuzzing libspark + +## Quickstart Guide +To quickly get started fuzzing libspark using honggfuzz: + +### Build firo +- clone this repo: +``` +git clone -b spark https://github.com/firoorg/firo.git +``` +- Build firo: Follow instruction from https://github.com/firoorg/firo/tree/spark#readme + +Once the build is successful, we have to install honggfuzz and required dependencies. + +### Installing fuzzer and Dependencies +- Install honggfuzz (https://github.com/google/honggfuzz) +``` +sudo apt-get install binutils-dev libunwind-dev libblocksruntime-dev clang +git clone https://github.com/google/honggfuzz.git +cd honggfuzz +make +sudo make install +``` +For more information you can look at https://github.com/google/honggfuzz/blob/master/docs/USAGE.md + +You might also need to install the following boost and ssl dependencies in order to compile the fuzzing harness: + +``` +sudo apt install libboost-dev +sudo apt install libssl-dev +sudo apt install libstdc++-12-dev +sudo apt install libboost-filesystem-dev +sudo apt install libboost-thread-dev +sudo apt install libboost-program-options-dev +sudo apt install libboost-chrono-dev +``` + +### Fuzzing using honggfuzz +* In order to fuzz `firo/src/libpark` using Honggfuzz: + +``` +cd firo/src/fuzz/ +export CC=hfuzz-clang +export CXX=hfuzz-clang++ +``` + +To compile with `hfuzz-clang++`, inside src/fuzz run: + +``` +make +``` + +For example(for bpplus): +``` +make bpplus +``` +The above command will generate an instrumented binary with name `_hfuzz` (eg: bpplus_hfuzz) inside src/fuzz/libspark. + +The fuzzing harness of the following spark files is availabe: aead, bech32, bpplus, chaum, coin, f4grumble, grootle, mint_transaction, schnorr and spend_transaction. + +* To start fuzzing: + +1. create directories for input corpora and for saving all the crashes +``` +mkdir input crashes +``` +2. Inside the crashes directory run: +``` +honggfuzz -i input -- ./libspark/_hfuzz ___FILE___ +``` + +example: +1. `mkdir input crashes` +2. `cd crashes` +2. `honggfuzz -i ../input -- ./../libspark/bpplus_hfuzz ___FILE___` +3. To stop press `ctrl+c` + +Here we are providing an empty corpora. In case of an already available corpora, we can provide the availabe corpora. +The flag `-i` is for the input folder which we are providing `./../_hfuzz>` is the target binary which we want to fuzz. + +### Analyzing the crashes + +If there is a crash, the reason for the crash can be found in HONGGFUZZ.REPORT.TXT or simply by running +``` +./libspark/ +``` + +Example: +``` +./libspark/bpplus_hfuzz SIGABRT.PC.7ffff7a8400b.STACK.1b5b5f0067.CODE.-6.ADDR.0.INSTR.mov____0x108(%rsp),%rax +``` + +To debug or to do the rootcause analysis, gdb debugger can be used. to debug using gdb debugger: + +1. First compile the harness using gdb flags `-g -O0 -ggdb`. To compile using gdb debugger, inside `src/fuzz` run: +``` +make _debug +``` +Example: +``` +make bpplus_debug +``` + +2. start the debugger by running: +``` +gdb --args +``` +Example: +``` +gdb --args bpplus_debug SIGABRT.PC.7ffff7a8400b.STACK.1b5b5f0067.CODE.-6.ADDR.0.INSTR.mov____0x108(%rsp),%rax +``` +This will start the debugger. + +3. You can do heap analysis by running `heap-analysis` inside the debugger and/or `bt` for backtracing. + + +### Generating a Coverage Report using kcov +* Install kcov (https://github.com/SimonKagstrom/kcov/tree/master) +``` +sudo apt-get install binutils-dev libssl-dev libcurl4-openssl-dev zlib1g-dev libdw-dev libiberty-dev +git clone https://github.com/SimonKagstrom/kcov.git +cd /path/to/kcov/source/dir +mkdir build +cd build +cmake .. +make +sudo make install +``` +Once successfully installed, follow the below instructions to generate the code-coverage + +1. First compile the harness with gdb flag. run `make _debug` inside src/fuzz to compile using gdb debugger. +2. take the input_folder as the input corpora from fuzzing or one can also create it by running: `honggfuzz -i -– ./_hfuzz ___FILE___ @@`. This will start the fuzzer. Kill it by `ctrl+C`. The fuzzer will generate some random inputs inside the input_folder. Since kcov will generate coverage for each input inside the input_folder, it's preffered to have only a few inputs, otherwise it will take a long time to generate the entire coverage. + +3. inside the `generate_coverage.sh` replace the input_folder, output_folder and fuzz_exe by your inpur corpora, coverage output folder and harness binary. +4. run `./generate_coverage.sh`. This will generated a merged output for all the inputs present in the input_folder. +5. To view the result run run `firefox ./merged-output/index.html`. + +6. alternatively or if you are on a VM, go inside coverage output folder and then merged-output +7. run `python3 -m http.server`. This will start a http server at http://0.0.0.0:8000/ +8. open your browser and paste http://0.0.0.0:8000/ to see the result. + +NOTE: to view the coverage for every dependent file, `generate_coverage.sh` should be in the root folder. Also, you should either delete the previous port or start the server on new port by running `python3 -m http.server ` for different files. \ No newline at end of file diff --git a/src/fuzz/fuzzing_utilities.cpp b/src/fuzz/fuzzing_utilities.cpp new file mode 100644 index 0000000000..af9b1f2c65 --- /dev/null +++ b/src/fuzz/fuzzing_utilities.cpp @@ -0,0 +1,89 @@ +#include "fuzzing_utilities.h" + +FuzzedSecp256k1Object::FuzzedSecp256k1Object(FuzzedDataProvider *fdp) { + this->fdp = fdp; +} + +secp_primitives::GroupElement FuzzedSecp256k1Object::GetGroupElement() { + char* x = (char *)this->fdp->ConsumeBytes(256).data(); + char* y = (char *)this->fdp->ConsumeBytes(256).data(); + secp_primitives::GroupElement ge = secp_primitives::GroupElement(x, y); + + return ge; +} + +secp_primitives::Scalar FuzzedSecp256k1Object::GetScalar() { + uint64_t value = this->fdp->ConsumeIntegral(); + secp_primitives::Scalar s = secp_primitives::Scalar(value); + + return s; +} + +secp_primitives::GroupElement FuzzedSecp256k1Object::GetMemberGroupElement() { + secp_primitives::GroupElement ge; + ge.randomize(); + return ge; +} + +std::vector FuzzedSecp256k1Object::GetMemberGroupElements(size_t len) { + std::vector ge_vec; + ge_vec.resize(len); + for (size_t i = 0; i < len; i++) { + ge_vec[i] = (GetMemberGroupElement()); + } + return ge_vec; +} + +std::vector FuzzedSecp256k1Object::GetRandomGroupVector(size_t len) { + std::vector result; + result.resize(len); + for (size_t i = 0; i < len; i++) { + result[i].randomize(); + } + return result; +} + +std::vector FuzzedSecp256k1Object::GetGroupElements(int len) { + std::vector ge_vec; + ge_vec.reserve(len); + for (int i = 0; i < len; i++) { + ge_vec.push_back(GetGroupElement()); + } + + return ge_vec; +} + +std::vector FuzzedSecp256k1Object::GetScalars(size_t len) { + std::vector scalar_vec; + scalar_vec.reserve(len); + for (int i = 0; i < len; i++) { + scalar_vec.push_back(GetScalar()); + } + + return scalar_vec; +} + +std::vector FuzzedSecp256k1Object::GetScalarsVector(size_t len) { + std::vector scalar_vec; + scalar_vec.reserve(len); + for (int i = 0; i < len; i++) { + scalar_vec.push_back(GetScalar()); + } + + return scalar_vec; +} + +secp_primitives::Scalar FuzzedSecp256k1Object::GetScalar_modified() { + secp_primitives::Scalar s = secp_primitives::Scalar(this->fdp->ConsumeBytes(256).data()); + return s; +} + +std::vector FuzzedSecp256k1Object::GetScalars_modified(int len) { + std::vector scalar_vec; + scalar_vec.reserve(len); + for (int i = 0; i < len; i++) { + scalar_vec.push_back(GetScalar_modified()); + } + + return scalar_vec; +} \ No newline at end of file diff --git a/src/fuzz/fuzzing_utilities.h b/src/fuzz/fuzzing_utilities.h new file mode 100644 index 0000000000..360d27c7e3 --- /dev/null +++ b/src/fuzz/fuzzing_utilities.h @@ -0,0 +1,23 @@ +#include "FuzzedDataProvider.h" +#include "../secp256k1/include/Scalar.h" +#include "../secp256k1/include/GroupElement.h" + +class FuzzedSecp256k1Object { + public: + FuzzedSecp256k1Object(FuzzedDataProvider *fdp); + + FuzzedDataProvider *fdp; + + secp_primitives::GroupElement GetGroupElement(); + secp_primitives::Scalar GetScalar(); + secp_primitives::GroupElement GetMemberGroupElement(); + secp_primitives::Scalar GetScalar_modified(); + + std::vector GetGroupElements(int len); + std::vector GetScalars(size_t len); + std::vector GetMemberGroupElements(size_t len); + std::vector GetRandomGroupVector(size_t len); + std::vector GetScalars_modified(int len); + std::vector GetScalarsVector(size_t len); + +}; \ No newline at end of file diff --git a/src/fuzz/generate_coverage.sh b/src/fuzz/generate_coverage.sh new file mode 100755 index 0000000000..bd91f0aaa6 --- /dev/null +++ b/src/fuzz/generate_coverage.sh @@ -0,0 +1,21 @@ +#!/bin/bash + +input_folder="../../src/fuzz/inputs/bpplus_inputs" +output_folder="../../src/fuzz/coverage_result/bpplus_coverage" +fuzz_exe="../../src/fuzz/libspark/bpplus_debug" + +mkdir $output_folder + +number_of_files=$(ls $input_folder | wc | awk '{print $1}') +echo "Number of input files to test: $number_of_files" + +count=0 + +for i in $(ls $input_folder); +do + kcov --include-path=. ./$output_folder/input_$count ./$fuzz_exe --stdout -d ./$input_folder/$i > /dev/null; + ((count++)); + echo "[++] Count of files processed: $count"; +done + +kcov --merge ./$output_folder/merged-output ./$output_folder/input_* \ No newline at end of file diff --git a/src/fuzz/libspark/aead_fuzz.cpp b/src/fuzz/libspark/aead_fuzz.cpp new file mode 100644 index 0000000000..b1b087dafb --- /dev/null +++ b/src/fuzz/libspark/aead_fuzz.cpp @@ -0,0 +1,24 @@ +#include "../fuzzing_utilities.h" +#include "../FuzzedDataProvider.h" +#include "../../libspark/aead.h" +#include + + +extern "C" int LLVMFuzzerTestOneInput(uint8_t *buf, size_t len) { + FuzzedDataProvider fdp(buf, len); + FuzzedSecp256k1Object fsp(&fdp); + + secp_primitives::GroupElement ge = fsp.GetGroupElement(); + std::string additional_data = fdp.ConsumeBytesAsString(len); + int fuzzed_message = fdp.ConsumeIntegral(); + CDataStream ser(SER_NETWORK, PROTOCOL_VERSION); + ser << fuzzed_message; + + spark::AEADEncryptedData aed = spark::AEAD::encrypt(ge, additional_data, ser); + ser = spark::AEAD::decrypt_and_verify(ge, additional_data, aed); + int received_fuzzed_message; + ser >> received_fuzzed_message; + assert(fuzzed_message == received_fuzzed_message); + + return 0; +} \ No newline at end of file diff --git a/src/fuzz/libspark/aead_fuzz_random_key.cpp b/src/fuzz/libspark/aead_fuzz_random_key.cpp new file mode 100644 index 0000000000..631f027dd7 --- /dev/null +++ b/src/fuzz/libspark/aead_fuzz_random_key.cpp @@ -0,0 +1,24 @@ +#include "../fuzzing_utilities.h" +#include "../FuzzedDataProvider.h" +#include "../../libspark/aead.h" +#include + + +extern "C" int LLVMFuzzerTestOneInput(uint8_t *buf, size_t len) { + FuzzedDataProvider fdp(buf, len); + FuzzedSecp256k1Object fsp(&fdp); + + secp_primitives::GroupElement ge = fsp.GetMemberGroupElement(); + std::string additional_data = fdp.ConsumeBytesAsString(len); + int fuzzed_message = fdp.ConsumeIntegral(); + CDataStream ser(SER_NETWORK, PROTOCOL_VERSION); + ser << fuzzed_message; + + spark::AEADEncryptedData aed = spark::AEAD::encrypt(ge, additional_data, ser); + ser = spark::AEAD::decrypt_and_verify(ge, additional_data, aed); + int received_fuzzed_message; + ser >> received_fuzzed_message; + assert(fuzzed_message == received_fuzzed_message); + + return 0; +} \ No newline at end of file diff --git a/src/fuzz/libspark/bech32_fuzz.cpp b/src/fuzz/libspark/bech32_fuzz.cpp new file mode 100644 index 0000000000..192b23b27d --- /dev/null +++ b/src/fuzz/libspark/bech32_fuzz.cpp @@ -0,0 +1,46 @@ +#include "../../libspark/bech32.h" +#include "../FuzzedDataProvider.h" +#include +#include + +enum class Bech32EncodingForFuzzing { + INVALID, + BECH32, + BECH32M, + kMaxValue = BECH32M +}; + +extern "C" int LLVMFuzzerTestOneInput(uint8_t *buf, size_t len) { + FuzzedDataProvider fuzzed_data(buf, len); + + std::string test_string = fuzzed_data.ConsumeBytesAsString(len); + std::vector test_vec = fuzzed_data.ConsumeBytes(len); + Bech32EncodingForFuzzing test_encoding_helper = fuzzed_data.ConsumeEnum(); + bech32::Encoding test_encoding; + switch (test_encoding_helper) { + case Bech32EncodingForFuzzing::INVALID: + test_encoding = bech32::Encoding::INVALID; + break; + case Bech32EncodingForFuzzing::BECH32: + test_encoding = bech32::Encoding::BECH32; + break; + case Bech32EncodingForFuzzing::BECH32M: + test_encoding = bech32::Encoding::BECH32M; + break; + } + std::string test_string_res; + test_string_res = bech32::encode(test_string, test_vec, test_encoding); + bech32::DecodeResult dr; + dr = bech32::decode(test_string_res); + assert(dr.hrp == test_string); + assert(dr.encoding == test_encoding); + assert(dr.data == test_vec); + + std::vector test_vec1 = fuzzed_data.ConsumeBytes(len); + std::vector test_vec2 = fuzzed_data.ConsumeBytes(len); + int test_frombits = fuzzed_data.ConsumeIntegral(); + int test_to_bits = fuzzed_data.ConsumeIntegral(); + bool test_pad = fuzzed_data.ConsumeBool(); + bech32::convertbits(test_vec1, test_vec2, test_frombits, test_to_bits, test_pad); + return 0; +} diff --git a/src/fuzz/libspark/bech32_fuzz_2.cpp b/src/fuzz/libspark/bech32_fuzz_2.cpp new file mode 100644 index 0000000000..bf71d3dd58 --- /dev/null +++ b/src/fuzz/libspark/bech32_fuzz_2.cpp @@ -0,0 +1,62 @@ +#include "../../libspark/bech32.h" +#include "../FuzzedDataProvider.h" +#include +#include +#include + +// enum class Bech32EncodingForFuzzing { +// INVALID, +// BECH32, +// BECH32M, +// kMaxValue = BECH32M +// }; + +bool CaseInsensitiveEqual(const std::string& s1, const std::string& s2) +{ + if (s1.size() != s2.size()) return false; + for (size_t i = 0; i < s1.size(); ++i) { + char c1 = s1[i]; + if (c1 >= 'A' && c1 <= 'Z') c1 -= ('A' - 'a'); + char c2 = s2[i]; + if (c2 >= 'A' && c2 <= 'Z') c2 -= ('A' - 'a'); + if (c1 != c2) return false; + } + return true; +} + +extern "C" int LLVMFuzzerTestOneInput(uint8_t *buf, size_t len) { + FuzzedDataProvider fuzzed_data(buf, len); + + std::string test_string = fuzzed_data.ConsumeBytesAsString(len); + + const auto r1 = bech32::decode(test_string); + if(r1.hrp.empty()) { + assert(r1.encoding == bech32::Encoding::INVALID); + assert(r1.data.empty()); + } else { + assert(r1.encoding != bech32::Encoding::INVALID); + const std::string reencoded = bech32::encode(r1.hrp, r1.data, r1.encoding); + assert(CaseInsensitiveEqual(test_string, reencoded)); + } + + std::vector input = fuzzed_data.ConsumeBytes(len); + std::vector test_vec2 = fuzzed_data.ConsumeBytes(len); + int test_frombits = fuzzed_data.ConsumeIntegral(); + int test_to_bits = fuzzed_data.ConsumeIntegral(); + bool test_pad = fuzzed_data.ConsumeBool(); + bech32::convertbits(input, test_vec2, test_frombits, test_to_bits, test_pad); + + if(input.size() + 3 + 6 <= 90) { + for (auto encoding: {bech32::Encoding::BECH32, bech32::Encoding::BECH32M}) { + const std::string encoded = bech32::encode("bc", input, encoding ); + assert(!encoded.empty()); + + const auto r2 = bech32::decode(encoded); + assert(r2.encoding == encoding); + assert(r2.hrp == "bc"); + assert(r2.data == input); + } + } + + return 0; +} diff --git a/src/fuzz/libspark/bpplus_fuzz.cpp b/src/fuzz/libspark/bpplus_fuzz.cpp new file mode 100644 index 0000000000..82f5504572 --- /dev/null +++ b/src/fuzz/libspark/bpplus_fuzz.cpp @@ -0,0 +1,112 @@ +#include "../fuzzing_utilities.h" +#include "../FuzzedDataProvider.h" +#include "../../libspark/bpplus.h" +#include "../../libspark/bpplus_proof.h" +#include + + +extern "C" int LLVMFuzzerTestOneInput(uint8_t *buf, size_t len) { + FuzzedDataProvider fdp(buf, len); + FuzzedSecp256k1Object fsp(&fdp); + + /** Single Proof **/ + size_t N0 = fdp.ConsumeIntegralInRange(0,64); + size_t M0 = fdp.ConsumeIntegral(); + + N0 = 64; + M0 = 4; + // Generators + GroupElement G0, H0; + G0.randomize(); + H0.randomize(); + + std::vector Gi0, Hi0; + size_t generators_needed = N0*M0; + if (!spark::is_nonzero_power_of_2(generators_needed)) { + generators_needed = 1 << (spark::log2(N0*M0) + 1); + } + + Gi0.resize(generators_needed); + Hi0.resize(generators_needed); + for (size_t i=0; i < generators_needed; i++) { + Gi0[i].randomize(); + Hi0[i].randomize(); + } + + // Commitments + std::vector v, r; + v.resize(M0); + r.resize(M0); + // v = fsp.GetScalars(M0); + // r = fsp.GetScalars(M0); + for(int i = 0; i < M0; i++){ + v[i] = Scalar((uint64_t) rand()); + r[i].randomize(); + } + + std::vector C0; + C0.resize(M0); + for (size_t i=0; i < M0; i++) { + C0[i] = G0*v[i] + H0*r[i]; + } + + spark::BPPlus bpplus0(G0, H0, Gi0, Hi0, N0); + spark::BPPlusProof proof0; + bpplus0.prove(v, r, C0, proof0); + assert(bpplus0.verify(C0, proof0)); + /** End of Single proof fuzz test**/ + + /** Batch Proof **/ + + size_t N1 = fdp.ConsumeIntegralInRange(1,64); + size_t B = fdp.ConsumeIntegral(); + N1 = 64; + B = 5; + + std::vector sizes; + sizes.resize(B); + for(int i = 0; i < B; i++){ + sizes[i] = (fdp.ConsumeIntegral() % 8) + 1 ; // otherwise it's "Bad BPPlus statement!4" line 102 bpplus.cpp since B = 5.(checked) + } + // sizes = fdp.ConsumeRemainingBytes(); + + // Generators + GroupElement G1, H1; + G1.randomize(); + H1.randomize(); + + // std::size_t next_power = 1 << (uint(log2(B)) + 1); + std::vector Gi1, Hi1; + Gi1.resize(8*N1); + Hi1.resize(8*N1); + for (size_t i=0; i < 8*N1; i++) { + Hi1[i].randomize(); + Gi1[i].randomize(); + } + + spark::BPPlus bpplus1(G1, H1, Gi1, Hi1, N1); + std::vector proofs; + proofs.resize(B); + std::vector> C1; + + for (size_t i=0; i < B; i++) { + std::size_t M = sizes[i]; + std::vector v, r; + v.resize(M); + r.resize(M); + std::vector C_; + C_.resize(M); + for (size_t j=0; j < M; j++) { + v[j] = Scalar(uint64_t(j)); + r[j].randomize(); + C_[j] = G1*v[j] + H1*r[j]; + } + C1.emplace_back(C_); + bpplus1.prove(v, r, C_, proofs[i]); + } + assert(bpplus1.verify(C1, proofs)); + + /** End of Batch proof fuzz test **/ + + return 0; +} diff --git a/src/fuzz/libspark/chaum_fuzz.cpp b/src/fuzz/libspark/chaum_fuzz.cpp new file mode 100644 index 0000000000..e25a9a8b00 --- /dev/null +++ b/src/fuzz/libspark/chaum_fuzz.cpp @@ -0,0 +1,229 @@ +#include "../fuzzing_utilities.h" +#include "../FuzzedDataProvider.h" +#include "../../libspark/chaum_proof.h" +#include "../../libspark/chaum.h" +#include + +extern "C" int LLVMFuzzerTestOneInput(uint8_t *buf, size_t len) { + FuzzedDataProvider fdp(buf, len); + FuzzedSecp256k1Object fsp(&fdp); + + /** Serialization tests **/ + GroupElement F0, G0, H0, U0; + F0.randomize(); + G0.randomize(); + H0.randomize(); + U0.randomize(); + + const std::size_t n = fdp.ConsumeIntegral(); + + Scalar mu0; + mu0.randomize(); + std::vector x0, y0, z0; + x0.resize(n); + y0.resize(n); + z0.resize(n); + std::vector S0, T0; + S0.resize(n); + T0.resize(n); + for (size_t i=0; i < n; i++) { + x0[i].randomize(); + y0[i].randomize(); + z0[i].randomize(); + + S0[i] = F0*x0[i] + G0*y0[i] + H0*z0[i]; + T0[i] = (U0 + G0*y0[i].negate())*x0[i].inverse(); + } + + spark::ChaumProof proof0; + + spark::Chaum chaum0(F0, G0, H0, U0); + chaum0.prove(mu0, x0, y0, z0, S0, T0, proof0); + + CDataStream serialized(SER_NETWORK, PROTOCOL_VERSION); + serialized << proof0; + + spark::ChaumProof deserialized_proof0; + serialized >> deserialized_proof0; + + assert(proof0.A1 == deserialized_proof0.A1); + assert(proof0.t2 == deserialized_proof0.t2); + assert(proof0.t3 == deserialized_proof0.t3); + for (size_t i = 0 ; i < n; i++) { + assert(proof0.A2[i] == deserialized_proof0.A2[i]); + assert(proof0.t1[i] == deserialized_proof0.t1[i]); + } + + /** Now fuzz all the things **/ + + GroupElement F1, G1, H1, U1; + F1 = fsp.GetMemberGroupElement(); + G1 = fsp.GetMemberGroupElement(); + H1 = fsp.GetMemberGroupElement(); + U1 = fsp.GetMemberGroupElement(); + //F1.randomize(); + //G1.randomize(); + //H1.randomize(); + //U1.randomize(); + + Scalar mu1; + mu1 = fsp.GetScalar(); + std::vector x1, y1, z1; + x1.resize(n); + x1 = fsp.GetScalars(n); + y1.resize(n); + y1 = fsp.GetScalars(n); + z1.resize(n); + z1 = fsp.GetScalars(n); + + std::vector S1, T1; + S1.resize(n); + T1.resize(n); + for (size_t i=0; i < n; i++) { + S1[i] = F1*x1[i] + G1*y1[i] + H1*z1[i]; + T1[i] = (U1 + G1*y1[i].negate())*x1[i].inverse(); + } + + spark::ChaumProof proof1; + + spark::Chaum chaum1(F1, G1, H1, U1); + chaum1.prove(mu1, x1, y1, z1, S1, T1, proof1); + + serialized << proof1; + + spark::ChaumProof deserialized_proof1; + serialized >> deserialized_proof1; + + assert(proof1.A1 == deserialized_proof1.A1); + assert(proof1.t2 == deserialized_proof1.t2); + assert(proof1.t3 == deserialized_proof1.t3); + for (size_t i = 0 ; i < n; i++) { + assert(proof1.A2[i] == deserialized_proof1.A2[i]); + assert(proof1.t1[i] == deserialized_proof1.t1[i]); + } + /**End of serialization tests**/ + + /** Completeness tests **/ + + GroupElement F2, G2, H2, U2; + F2.randomize(); + G2.randomize(); + H2.randomize(); + U2.randomize(); + + Scalar mu2; + mu2.randomize(); + std::vector x2, y2, z2; + x2.resize(n); + y2.resize(n); + z2.resize(n); + std::vector S2, T2; + S2.resize(n); + T2.resize(n); + for (size_t i=0; i < n; i++) { + x2[i].randomize(); + y2[i].randomize(); + z2[i].randomize(); + + S2[i] = F2*x2[i] + G2*y2[i] + H2*z2[i]; + T2[i] = (U2 + G2*y2[i].negate())*x2[i].inverse(); + } + + spark::ChaumProof proof2; + + spark::Chaum chaum2(F2, G2, H2, U2); + chaum2.prove(mu2, x2, y2, z2, S2, T2, proof2); + assert(chaum2.verify(mu2, S2, T2, proof2)); + + /** Full all the things again**/ + + GroupElement F3, G3, H3, U3; + F3 = fsp.GetMemberGroupElement(); + G3 = fsp.GetMemberGroupElement(); + H3 = fsp.GetMemberGroupElement(); + U3 = fsp.GetMemberGroupElement(); + //F3.randomize(); + //G3.randomize(); + //H3.randomize(); + //U3.randomize(); + + + Scalar mu3; + mu3 = fsp.GetScalar(); + std::vector x3, y3, z3; + x3.resize(n); + x3 = fsp.GetScalars(n); + y3.resize(n); + y3 = fsp.GetScalars(n); + z3.resize(n); + z3 = fsp.GetScalars(n); + + std::vector S3, T3; + S3.resize(n); + T3.resize(n); + for (size_t i=0; i < n; i++) { + S3[i] = F3*x3[i] + G3*y3[i] + H3*z3[i]; + T3[i] = (U3 + G3*y3[i].negate())*x3[i].inverse(); + } + + spark::ChaumProof proof3; + + spark::Chaum chaum3(F3, G3, H3, U3); + chaum3.prove(mu3, x3, y3, z3, S3, T3, proof3); + assert(chaum3.verify(mu3, S3, T3, proof3)); + + /** End of completeness tests**/ + + /* Fuzzing for bad proofs*/ + + // Bad mu + Scalar evil_mu; + evil_mu.randomize(); + assert(!(chaum3.verify(evil_mu, S3, T3, proof3))); + + // Bad S + for (std::size_t i = 0; i < n; i++) { + std::vector evil_S(S3); + evil_S[i].randomize(); + assert(!(chaum3.verify(mu3, evil_S, T3, proof3))); + } + + // Bad T + for (std::size_t i = 0; i < n; i++) { + std::vector evil_T(T3); + evil_T[i].randomize(); + assert(!(chaum3.verify(mu3, S3, evil_T, proof3))); + } + + // Bad A1 + spark::ChaumProof evil_proof = proof3; + evil_proof.A1.randomize(); + assert(!(chaum3.verify(mu3, S3, T3, evil_proof))); + + // Bad A2 + for (std::size_t i = 0; i < n; i++) { + evil_proof = proof3; + evil_proof.A2[i].randomize(); + assert(!(chaum3.verify(mu3, S3, T3, evil_proof))); + } + + // Bad t1 + for (std::size_t i = 0; i < n; i++) { + evil_proof = proof3; + evil_proof.t1[i].randomize(); + assert(!(chaum3.verify(mu3, S3, T3, evil_proof))); + } + + // Bad t2 + evil_proof = proof3; + evil_proof.t2.randomize(); + assert(!(chaum3.verify(mu3, S3, T3, evil_proof))); + + // Bad t3 + evil_proof = proof3; + evil_proof.t3.randomize(); + assert(!(chaum3.verify(mu3, S3, T3, evil_proof))); + + return 0; + +} \ No newline at end of file diff --git a/src/fuzz/libspark/chaum_fuzz_2.cpp b/src/fuzz/libspark/chaum_fuzz_2.cpp new file mode 100644 index 0000000000..9015c93b76 --- /dev/null +++ b/src/fuzz/libspark/chaum_fuzz_2.cpp @@ -0,0 +1,145 @@ +#include "../fuzzing_utilities.h" +#include "../FuzzedDataProvider.h" +#include "../../libspark/chaum_proof.h" +#include "../../libspark/chaum.h" +#include + +extern "C" int LLVMFuzzerTestOneInput(uint8_t *buf, size_t len) { + + if (len == 0) { + return 0; + } + + FuzzedDataProvider fdp(buf, len); + FuzzedSecp256k1Object fsp(&fdp); + + GroupElement F1, G1, H1, U1; + std::vector ge = fsp.GetGroupElements(4); + + F1 = ge[0]; + G1 = ge[1]; + H1 = ge[2]; + U1 = ge[3]; + + const std::size_t n = fdp.ConsumeIntegral(); + + Scalar mu1; + mu1 = fsp.GetScalar(); + std::vector x1, y1, z1; + x1.resize(n); + x1 = fsp.GetScalars(n); + y1.resize(n); + y1 = fsp.GetScalars(n); + z1.resize(n); + z1 = fsp.GetScalars(n); + + std::vector S1, T1; + S1.resize(n); + T1.resize(n); + for (size_t i=0; i < n; i++) { + S1[i] = F1*x1[i] + G1*y1[i] + H1*z1[i]; + T1[i] = (U1 + G1*y1[i].negate())*x1[i].inverse(); + } + + spark::ChaumProof proof1; + + spark::Chaum chaum1(F1, G1, H1, U1); + chaum1.prove(mu1, x1, y1, z1, S1, T1, proof1); + + CDataStream serialized(SER_NETWORK, PROTOCOL_VERSION); + serialized << proof1; + + spark::ChaumProof deserialized_proof1; + serialized >> deserialized_proof1; + + assert(proof1.A1 == deserialized_proof1.A1); + assert(proof1.t2 == deserialized_proof1.t2); + assert(proof1.t3 == deserialized_proof1.t3); + for (size_t i = 0 ; i < n; i++) { + assert(proof1.A2[i] == deserialized_proof1.A2[i]); + assert(proof1.t1[i] == deserialized_proof1.t1[i]); + } + + GroupElement F3, G3, H3, U3; + F3 = fsp.GetGroupElement(); + G3 = fsp.GetGroupElement(); + H3 = fsp.GetGroupElement(); + U3 = fsp.GetGroupElement(); + + Scalar mu3; + mu3 = fsp.GetScalar(); + std::vector x3, y3, z3; + x3.resize(n); + x3 = fsp.GetScalars(n); + y3.resize(n); + y3 = fsp.GetScalars(n); + z3.resize(n); + z3 = fsp.GetScalars(n); + + std::vector S3, T3; + S3.resize(n); + T3.resize(n); + for (size_t i=0; i < n; i++) { + S3[i] = F3*x3[i] + G3*y3[i] + H3*z3[i]; + T3[i] = (U3 + G3*y3[i].negate())*x3[i].inverse(); + } + + spark::ChaumProof proof3; + + spark::Chaum chaum3(F3, G3, H3, U3); + chaum3.prove(mu3, x3, y3, z3, S3, T3, proof3); + assert(chaum3.verify(mu3, S3, T3, proof3)); + + /* Fuzzing for bad proofs*/ + + // Bad mu + Scalar evil_mu; + evil_mu.randomize(); + assert(!(chaum3.verify(evil_mu, S3, T3, proof3))); + + // Bad S + for (std::size_t i = 0; i < n; i++) { + std::vector evil_S(S3); + evil_S[i].randomize(); + assert(!(chaum3.verify(mu3, evil_S, T3, proof3))); + } + + // Bad T + for (std::size_t i = 0; i < n; i++) { + std::vector evil_T(T3); + evil_T[i].randomize(); + assert(!(chaum3.verify(mu3, S3, evil_T, proof3))); + } + + // Bad A1 + spark::ChaumProof evil_proof = proof3; + evil_proof.A1.randomize(); + assert(!(chaum3.verify(mu3, S3, T3, evil_proof))); + + // Bad A2 + for (std::size_t i = 0; i < n; i++) { + evil_proof = proof3; + evil_proof.A2[i].randomize(); + assert(!(chaum3.verify(mu3, S3, T3, evil_proof))); + } + + // Bad t1 + for (std::size_t i = 0; i < n; i++) { + evil_proof = proof3; + evil_proof.t1[i].randomize(); + assert(!(chaum3.verify(mu3, S3, T3, evil_proof))); + } + + // Bad t2 + evil_proof = proof3; + evil_proof.t2.randomize(); + assert(!(chaum3.verify(mu3, S3, T3, evil_proof))); + + // Bad t3 + evil_proof = proof3; + evil_proof.t3.randomize(); + assert(!(chaum3.verify(mu3, S3, T3, evil_proof))); + + return 0; + +} \ No newline at end of file diff --git a/src/fuzz/libspark/chaum_fuzz_member.cpp b/src/fuzz/libspark/chaum_fuzz_member.cpp new file mode 100644 index 0000000000..f8625545e3 --- /dev/null +++ b/src/fuzz/libspark/chaum_fuzz_member.cpp @@ -0,0 +1,143 @@ +#include "../fuzzing_utilities.h" +#include "../FuzzedDataProvider.h" +#include "../../libspark/chaum_proof.h" +#include "../../libspark/chaum.h" +#include + +extern "C" int LLVMFuzzerTestOneInput(uint8_t *buf, size_t len) { + FuzzedDataProvider fdp(buf, len); + FuzzedSecp256k1Object fsp(&fdp); + + /** Serialization tests **/ + GroupElement F0, G0, H0, U0; + F0.randomize(); + G0.randomize(); + H0.randomize(); + U0.randomize(); + + const std::size_t n = fdp.ConsumeIntegralInRange(1, INT_MAX); + + Scalar mu0; + mu0.randomize(); + std::vector x0, y0, z0; + x0.resize(n); + y0.resize(n); + z0.resize(n); + std::vector S0, T0; + S0.resize(n); + T0.resize(n); + for (size_t i=0; i < n; i++) { + x0[i].randomize(); + y0[i].randomize(); + z0[i].randomize(); + + S0[i] = F0*x0[i] + G0*y0[i] + H0*z0[i]; + T0[i] = (U0 + G0*y0[i].negate())*x0[i].inverse(); + } + + spark::ChaumProof proof0; + + spark::Chaum chaum0(F0, G0, H0, U0); + chaum0.prove(mu0, x0, y0, z0, S0, T0, proof0); + + CDataStream serialized(SER_NETWORK, PROTOCOL_VERSION); + serialized << proof0; + + spark::ChaumProof deserialized_proof0; + serialized >> deserialized_proof0; + + assert(proof0.A1 == deserialized_proof0.A1); + assert(proof0.t2 == deserialized_proof0.t2); + assert(proof0.t3 == deserialized_proof0.t3); + for (size_t i = 0 ; i < n; i++) { + assert(proof0.A2[i] == deserialized_proof0.A2[i]); + assert(proof0.t1[i] == deserialized_proof0.t1[i]); + } + + // fuzz completeness + GroupElement F1, G1, H1, U1; + F1.randomize(); + G1.randomize(); + H1.randomize(); + U1.randomize(); + + const std::size_t n1 = fdp.ConsumeIntegralInRange(1, INT_MAX); + + Scalar mu1; + mu1.randomize(); + std::vector x1, y1, z1; + x1.resize(n1); + y1.resize(n1); + z1.resize(n1); + std::vector S1, T1; + S1.resize(n1); + T1.resize(n1); + for (std::size_t i = 0; i < n; i++) { + x1[i].randomize(); + y1[i].randomize(); + z1[i].randomize(); + + S1[i] = F1*x1[i] + G1*y1[i] + H1*z1[i]; + T1[i] = (U1 + G1*y1[i].negate())*x1[i].inverse(); + } + + spark::ChaumProof proof1; + spark::Chaum chaum1(F1, G1, H1, U1); + chaum1.prove(mu1, x1, y1, z1, S1, T1, proof1); + + assert(chaum1.verify(mu1, S1, T1, proof1)); + /** End of completeness tests**/ + + /* Fuzzing for bad proofs*/ + + // Bad mu + Scalar evil_mu; + evil_mu.randomize(); + assert(!(chaum1.verify(evil_mu, S1, T1, proof1))); + + // Bad S + for (std::size_t i = 0; i < n1; i++) { + std::vector evil_S(S1); + evil_S[i].randomize(); + assert(!(chaum1.verify(m1, evil_S, T1, proof1))); + } + + // Bad T + for (std::size_t i = 0; i < n1; i++) { + std::vector evil_T(T1); + evil_T[i].randomize(); + assert(!(chaum1.verify(mu1, S1, evil_T, proof1))); + } + + // Bad A1 + spark::ChaumProof evil_proof = proof1; + evil_proof.A1.randomize(); + assert(!(chaum1.verify(mu1, S1, T1, evil_proof))); + + // Bad A2 + for (std::size_t i = 0; i < n1; i++) { + evil_proof = proof1; + evil_proof.A2[i].randomize(); + assert(!(chaum1.verify(mu1, S1, T1, evil_proof))); + } + + // Bad t1 + for (std::size_t i = 0; i < n1; i++) { + evil_proof = proof1; + evil_proof.t1[i].randomize(); + assert(!(chaum3.verify(mu3, S3, T3, evil_proof))); + } + + // Bad t2 + evil_proof = proof3; + evil_proof.t2.randomize(); + assert(!(chaum3.verify(mu3, S3, T3, evil_proof))); + + // Bad t3 + evil_proof = proof3; + evil_proof.t3.randomize(); + assert(!(chaum3.verify(mu3, S3, T3, evil_proof))); + + return 0; + +} \ No newline at end of file diff --git a/src/fuzz/libspark/coin_fuzz.cpp b/src/fuzz/libspark/coin_fuzz.cpp new file mode 100644 index 0000000000..79b0a6f0b4 --- /dev/null +++ b/src/fuzz/libspark/coin_fuzz.cpp @@ -0,0 +1,72 @@ +#include "../fuzzing_utilities.h" +#include "../FuzzedDataProvider.h" +#include "../../libspark/coin.h" +// #include "../../test/test_bitcoin.h" + +#include + +const std::size_t SCALAR_ENCODING = 32; +const char COIN_TYPE_MINT = 0; +const char COIN_TYPE_SPEND = 1; + + +extern "C" int LLVMFuzzerTestOneInput(uint8_t *buf, size_t len) { + FuzzedDataProvider fdp(buf, len); + FuzzedSecp256k1Object fsp(&fdp); + + // Scalar temp = fsp.GetScalar(); + Scalar temp; + temp.randomize(); + + std::vector result; + result.resize(SCALAR_ENCODING); + temp.serialize(result.data()); + + const spark::Params* params; + params = spark::Params::get_default(); + + const uint64_t i = len; + + // it will be better to choose s different way to generate the value + const uint64_t v = std::rand(); + const std::string memo = fdp.ConsumeBytesAsString(len); + + // Generate keys + spark::SpendKey spend_key(params); + spark::FullViewKey full_view_key(spend_key); + spark::IncomingViewKey incoming_view_key(full_view_key); + + // Generate address + spark::Address address(incoming_view_key, i); + + // Generate coin + // Scalar k = fsp.GetScalar(); + Scalar k; + k.randomize(); + + spark::Coin coin = spark::Coin ( + params, + COIN_TYPE_MINT, + k, + address, + v, + memo, + result + ); + + // Identify coin + spark::IdentifiedCoinData i_data = coin.identify(incoming_view_key); + assert(i_data.i == i); + assert(i_data.d == address.get_d()); + assert(i_data.v == v); + assert(i_data.memo == memo); + + // Recover coin + spark::RecoveredCoinData r_data = coin.recover(full_view_key, i_data); + assert(params->get_F()*(spark::SparkUtils::hash_ser(k, coin.serial_context) + spark::SparkUtils::hash_Q2(incoming_view_key.get_s1(), i) + full_view_key.get_s2()) + full_view_key.get_D() == params->get_F()*r_data.s + full_view_key.get_D()); + + assert(r_data.T * r_data.s + full_view_key.get_D() == params->get_U()); + + + +} \ No newline at end of file diff --git a/src/fuzz/libspark/f4grumble_fuzz.cpp b/src/fuzz/libspark/f4grumble_fuzz.cpp new file mode 100644 index 0000000000..9d0f9b8d43 --- /dev/null +++ b/src/fuzz/libspark/f4grumble_fuzz.cpp @@ -0,0 +1,62 @@ +#include "../../libspark/f4grumble.h" +#include + +extern "C" int LLVMFuzzerTestOneInput(uint8_t *buf, size_t len) { + std::string test_string = std::string((char *) buf); + std::vector test_char_vec; + test_char_vec.reserve(len); + + for (int i=0; i < len; i++) { + test_char_vec.push_back(test_string[i]); + } + + // too_long_size + bool exception_thrown_size = false; + bool exception_thrown_encode = false; + bool exception_thrown_decode = false; + + if(len > spark::F4Grumble::get_max_size()){ + + try { + spark::F4Grumble grumble(test_string[0], len); + } catch(const std::exception& ) { + exception_thrown_size = true; + } + assert(exception_thrown_size); + + spark::F4Grumble grumble = spark::F4Grumble(test_string[0], len); + + try { + grumble.encode(test_char_vec); + } catch (const std::exception& ) { + exception_thrown_encode = true; + } + + assert(exception_thrown_encode); + try { + grumble.decode(test_char_vec); + } catch (const std::exception& ) { + exception_thrown_decode = true; + } + assert(exception_thrown_decode); + return 0; + } + + spark::F4Grumble f4grumble_fuzz = spark::F4Grumble(test_string[0], len); + std::vector scrambled = f4grumble_fuzz.encode(test_char_vec); + std::vector unscrambled = f4grumble_fuzz.decode(scrambled); + + assert(scrambled.size() == test_char_vec.size()); + assert(unscrambled == test_char_vec); + + // bad_network + unsigned char evil_network = ~test_string[0]; + assert(test_string[0] != evil_network); + + spark::F4Grumble evil_grumble(evil_network, len); + //decoding with a different network + std::vector evil_unscrambled = evil_grumble.decode(scrambled); + assert(evil_unscrambled.size() == scrambled.size()); + assert(evil_unscrambled != test_char_vec); + return 0; +} \ No newline at end of file diff --git a/src/fuzz/libspark/grootle_fuzz.cpp b/src/fuzz/libspark/grootle_fuzz.cpp new file mode 100644 index 0000000000..de51fb0043 --- /dev/null +++ b/src/fuzz/libspark/grootle_fuzz.cpp @@ -0,0 +1,89 @@ +#include "../fuzzing_utilities.h" +#include "../FuzzedDataProvider.h" +#include "../../libspark/grootle.h" +#include "../../libspark/grootle_proof.h" +#include + +extern "C" int LLVMFuzzerTestOneInput(uint8_t *buf, size_t len) { + FuzzedDataProvider fdp(buf, len); + FuzzedSecp256k1Object fsp(&fdp); + + size_t n = fdp.ConsumeIntegral(); + size_t m = fdp.ConsumeIntegral(); + size_t N = (size_t) std::pow(n, m); + + GroupElement H; + std::vector Gi = fsp.GetGroupElements(n*m); + std::vector Hi = fsp.GetGroupElements(n*m); + + size_t commit_size = fdp.ConsumeIntegral(); + std::vector S = fsp.GetGroupElements(commit_size); + std::vector V = fsp.GetGroupElements(commit_size); + + std::vector indexes = fdp.ConsumeBytes(len); + std::vector sizes; + sizes.resize(len); + for(size_t i=0; i < len; i++) { + sizes[i] = fdp.ConsumeIntegral(); + } + std::vector S1, V1; + std::vector> roots; + std::vector s, v; + for (std::size_t index : indexes) { + Scalar s_, v_; + s_ = fsp.GetScalar(); + v_ = fsp.GetScalar(); + s.emplace_back(s_); + v.emplace_back(v_); + + S1.emplace_back(S[index]); + V1.emplace_back(V[index]); + + S[index] += H*s_; + V[index] += H*v_; + + Scalar temp; + temp = fsp.GetScalar(); + std::vector root; + root.reserve(spark::SCALAR_ENCODING); + temp.serialize(root.data()); + roots.emplace_back(root); + } + + spark::Grootle grootle(H, Hi, Hi, n, m); + std::vector proofs; + + for (size_t i=0; i < indexes.size(); i++) { + proofs.emplace_back(); + std::vector S_(S.begin() + commit_size - sizes[i], S.end()); + std::vector V_(V.begin() + commit_size - sizes[i], V.end()); + + grootle.prove( + indexes[i] - (commit_size - sizes[i]), + s[i], + S_, + S1[i], + v[i], + V_, + V1[i], + roots[i], + proofs.back() + + ); + + assert(grootle.verify(S, S1[i], V, V1[i], roots[i], sizes[i], proofs.back())); + } + + assert(grootle.verify(S, S1, V, V1, roots, sizes, proofs)); + + // Add an invalid proof + proofs.emplace_back(proofs.back()); + S1.emplace_back(S1.back()); + V1.emplace_back(V1.back()); + S1.back().randomize(); + sizes.emplace_back(sizes.back()); + assert(!grootle.verify(S, S1, V, V1, roots, sizes, proofs)); + + return 0; + +} \ No newline at end of file diff --git a/src/fuzz/libspark/grootle_fuzz_member.cpp b/src/fuzz/libspark/grootle_fuzz_member.cpp new file mode 100644 index 0000000000..2f0b57fb0e --- /dev/null +++ b/src/fuzz/libspark/grootle_fuzz_member.cpp @@ -0,0 +1,90 @@ +#include "../fuzzing_utilities.h" +#include "../FuzzedDataProvider.h" +#include "../../libspark/grootle.h" +#include "../../libspark/grootle_proof.h" +#include + +extern "C" int LLVMFuzzerTestOneInput(uint8_t *buf, size_t len) { + FuzzedDataProvider fdp(buf, len); + FuzzedSecp256k1Object fsp(&fdp); + + std::size_t n = fdp.ConsumeIntegralInRange(2, 65535); + std::size_t m = fdp.ConsumeIntegralInRange(2, 65535); + std::size_t N = (size_t) std::pow(n,m); + + GroupElement H; + H.randomize(); + std::vector Gi = fsp.GetRandomGroupVector(n*m); + std::vector Hi = fsp.GetRandomGroupVector(n*m); + + size_t commit_size = fdp.ConsumeIntegralInRange(1, N); + std::vector S = fsp.GetRandomGroupVector(commit_size); + std::vector V = fsp.GetRandomGroupVector(commit_size); + + std::vector indexes = fdp.ConsumeBytes(N); + std::vector sizes; + sizes.resize(N); + for(size_t i=0; i < N; i++) { + sizes[i] = fdp.ConsumeIntegralInRange(0, N); + } + std::vector S1, V1; + std::vector> roots; + std::vector s, v; + for (std::size_t index : indexes) { + Scalar s_, v_; + s_ = fsp.GetScalar(); + v_ = fsp.GetScalar(); + s.emplace_back(s_); + v.emplace_back(v_); + + S1.emplace_back(S[index]); + V1.emplace_back(V[index]); + + S[index] += H*s_; + V[index] += H*v_; + + Scalar temp; + temp = fsp.GetScalar(); + std::vector root; + root.reserve(spark::SCALAR_ENCODING); + temp.serialize(root.data()); + roots.emplace_back(root); + } + + spark::Grootle grootle(H, Gi, Hi, n, m); + std::vector proofs; + + for (size_t i=0; i < indexes.size(); i++) { + proofs.emplace_back(); + std::vector S_(S.begin() + commit_size - sizes[i], S.end()); + std::vector V_(V.begin() + commit_size - sizes[i], V.end()); + + grootle.prove( + indexes[i] - (commit_size - sizes[i]), + s[i], + S_, + S1[i], + v[i], + V_, + V1[i], + roots[i], + proofs.back() + + ); + + assert(grootle.verify(S, S1[i], V, V1[i], roots[i], sizes[i], proofs.back())); + } + + assert(grootle.verify(S, S1, V, V1, roots, sizes, proofs)); + + // Add an invalid proof + proofs.emplace_back(proofs.back()); + S1.emplace_back(S1.back()); + V1.emplace_back(V1.back()); + S1.back().randomize(); + sizes.emplace_back(sizes.back()); + assert(!grootle.verify(S, S1, V, V1, roots, sizes, proofs)); + + return 0; + +} \ No newline at end of file diff --git a/src/fuzz/libspark/mint_transaction_fuzz.cpp b/src/fuzz/libspark/mint_transaction_fuzz.cpp new file mode 100644 index 0000000000..8e10795667 --- /dev/null +++ b/src/fuzz/libspark/mint_transaction_fuzz.cpp @@ -0,0 +1,34 @@ +#include "../fuzzing_utilities.h" +#include "../FuzzedDataProvider.h" +#include "../../libspark/mint_transaction.h" +#include + +extern "C" int LLVMFuzzerTestOneInput(uint8_t *buf, size_t len) { + FuzzedDataProvider fdp(buf, len); + FuzzedSecp256k1Object fsp(&fdp); + + const spark::Params* params; + params = spark::Params::get_default(); + const size_t t = fdp.ConsumeIntegral(); + + spark::SpendKey spend_key(params); + spark::FullViewKey full_view_key(spend_key); + spark::IncomingViewKey incoming_view_key(full_view_key); + + std::vector outputs; + + for (size_t i = 0; i < t; i++) { + spark::MintedCoinData output; + output.address = spark::Address(incoming_view_key, fdp.ConsumeIntegral()); + output.v = fdp.ConsumeIntegral(); + output.memo = fdp.ConsumeBytesAsString(len); + outputs.emplace_back(output); + } + + spark::MintTransaction mint(params, outputs, fdp.ConsumeBytes(spark::SCALAR_ENCODING)); + assert(mint.verify()); + + + return 0; + +} \ No newline at end of file diff --git a/src/fuzz/libspark/schnorr_fuzz.cpp b/src/fuzz/libspark/schnorr_fuzz.cpp new file mode 100644 index 0000000000..735c68d67f --- /dev/null +++ b/src/fuzz/libspark/schnorr_fuzz.cpp @@ -0,0 +1,95 @@ +#include "../fuzzing_utilities.h" +#include "../FuzzedDataProvider.h" +#include "../../libspark/schnorr_proof.h" +#include "../../libspark/schnorr.h" +#include + +extern "C" int LLVMFuzzerTestOneInput(uint8_t *buf, size_t len) { + FuzzedDataProvider fdp(buf, len); + FuzzedSecp256k1Object fsp(&fdp); + + /** Serialization and Completeness tests **/ + GroupElement G0; + // G0 = fsp.GetGroupElement(); + + // NOTE: all GetGroupElement() is replaced by GetMemberGroupElement() + + // ensure that G0 is valid group element + // thus the crash of valid fieldElement and groupElement will not occur + G0.generate(buf); + + Scalar y0; + y0 = fsp.GetScalar(); + GroupElement Y0 = G0*y0; + + spark::SchnorrProof proof0; + + spark::Schnorr schnorr0(G0); + schnorr0.prove(y0, Y0, proof0); + + CDataStream serialized(SER_NETWORK, PROTOCOL_VERSION); + serialized << proof0; + + spark::SchnorrProof deserialized_proof0; + serialized >> deserialized_proof0; + + assert(proof0.A == deserialized_proof0.A); + assert(proof0.t == deserialized_proof0.t); + assert(schnorr0.verify(Y0, proof0)); + + /** End of serialization and completeness tests **/ + + /** Aggregation test **/ + + size_t n = fdp.ConsumeIntegral(); + + GroupElement G1; + G1 = fsp.GetMemberGroupElement(); + std::vector y1; + std::vector Y1; + + for(size_t i=0; i < n; i++) { + y1.emplace_back(); + y1.back() = fsp.GetScalar(); + + Y1.emplace_back(G1 * y1.back()); + } + + spark::SchnorrProof proof1; + spark::Schnorr schnorr1(G1); + schnorr1.prove(y1, Y1, proof1); + assert(schnorr1.verify(Y1, proof1)); + + /** End of aggregation test **/ + + /* + fuzzing bad proofs + */ + + // Bad Y + GroupElement evil_Y; + evil_Y.randomize(); + assert(!(schnorr1.verify(evil_Y, proof1))); + + // Bad A + spark::SchnorrProof evil_proof = proof1; + evil_proof.A.randomize(); + assert(!(schnorr1.verify(Y1, evil_proof))); + + // Bad t + evil_proof = proof1; + evil_proof.t.randomize(); + assert(!(schnorr1.verify(Y1, evil_proof))); + + // //checking empty proof + // std::vector y3; + // std::vector Y3; + // y3.resize(0); + // Y3.resize(0); + // spark::SchnorrProof proof3; + + // spark::Schnorr schnorr3(G1); + // schnorr3.prove(y3, Y3, proof3); + // assert(schnorr1.verify(Y3, proof3)); + +} \ No newline at end of file diff --git a/src/fuzz/libspark/spend_transaction_fuzz.cpp b/src/fuzz/libspark/spend_transaction_fuzz.cpp new file mode 100644 index 0000000000..14461e70b7 --- /dev/null +++ b/src/fuzz/libspark/spend_transaction_fuzz.cpp @@ -0,0 +1,110 @@ +#include "../fuzzing_utilities.h" +#include "../FuzzedDataProvider.h" +#include "../../libspark/spend_transaction.h" +#include + +extern "C" int LLVMFuzzerTestOneInput(uint8_t *buf, size_t len) { + FuzzedDataProvider fdp(buf, len); + FuzzedSecp256k1Object fsp(&fdp); + + const spark::Params* params; + params = spark::Params::get_default(); + const std::string memo = fdp.ConsumeBytesAsString(len); + + spark::SpendKey spend_key(params); + spark::FullViewKey full_view_key(spend_key); + spark::IncomingViewKey incoming_view_key(full_view_key); + + spark::Address address(incoming_view_key, fdp.ConsumeIntegral()); + + size_t N = (size_t) pow(params->get_n_grootle(), params->get_m_grootle()); + + bool exception_thrown = false; + if (memo.size() > params->get_memo_bytes()) { + try{ + Scalar k; + k.randomize(); + uint64_t v = rand(); + spark::Coin(params, spark::COIN_TYPE_MINT, k, address, v, memo, fdp.ConsumeBytes(spark::SCALAR_ENCODING)); + } catch(const std::exception& ) { + exception_thrown = true; + } + assert(exception_thrown); + return 0; + } + + std::vector in_coins; + for (size_t i = 0; i < N; i ++) { + secp_primitives::Scalar k = fsp.GetScalar(); + + uint64_t v = fdp.ConsumeIntegral(); + + in_coins.emplace_back(spark::Coin(params, spark::COIN_TYPE_MINT, k, address, v, memo, fdp.ConsumeBytes(spark::SCALAR_ENCODING))); + } + + uint64_t f = 0; + + std::vector spend_indices = fdp.ConsumeBytes(len); + if (spend_indices.size() < len) { + for (int i = spend_indices.size(); i < len; i++) { + spend_indices.push_back(std::rand()); + } + } + std::vector spend_coin_data; + std::unordered_map cover_set_data; + const size_t w = spend_indices.size(); + for (size_t u = 0; u < w; u++) { + spark::IdentifiedCoinData identified_coin_data = in_coins[spend_indices[u]].identify(incoming_view_key); + spark::RecoveredCoinData recovered_coin_data = in_coins[spend_indices[u]].recover(full_view_key, identified_coin_data); + + spend_coin_data.emplace_back(); + uint64_t cover_set_id = fdp.ConsumeIntegral(); + spend_coin_data.back().cover_set_id = cover_set_id; + + spark::CoverSetData set_data; + set_data.cover_set = in_coins; + set_data.cover_set_representation = fdp.ConsumeBytes(spark::SCALAR_ENCODING); + cover_set_data[cover_set_id] = set_data; + spend_coin_data.back().index = spend_indices[u]; + spend_coin_data.back().k = identified_coin_data.k; + spend_coin_data.back().s = recovered_coin_data.s; + spend_coin_data.back().T = recovered_coin_data.T; + spend_coin_data.back().v = identified_coin_data.v; + + f += identified_coin_data.v; + } + + const size_t t = fdp.ConsumeIntegral(); + std::vector out_coin_data; + for (size_t j = 0; j < t; j++) { + out_coin_data.emplace_back(); + out_coin_data.back().address = address; + out_coin_data.back().v = fdp.ConsumeIntegral(); + out_coin_data.back().memo = memo; + + f -= out_coin_data.back().v; + } + + uint64_t fee_test = f; + for (size_t j = 0; j < t; j++) { + fee_test += out_coin_data[j].v; + } + + for (size_t j = 0; j < t; j++) { + fee_test -= spend_coin_data[j].v; + } + assert(fee_test == 0); + + spark::SpendTransaction transaction(params, full_view_key, spend_key, spend_coin_data, cover_set_data, f, 0, out_coin_data); + + transaction.setCoverSets(cover_set_data); + std::unordered_map> cover_sets; + for (const auto set_data: cover_set_data) { + cover_sets[set_data.first] = set_data.second.cover_set; + } + assert(spark::SpendTransaction::verify(transaction, cover_sets)); + + + return 0; + +} \ No newline at end of file From e5e15f76cd4bce666a591bc1786e0faf5bc530b5 Mon Sep 17 00:00:00 2001 From: levonpetrosyan93 Date: Thu, 16 Nov 2023 18:00:54 +0400 Subject: [PATCH 04/13] Fix getsparkanonymityset --- src/spark/state.cpp | 2 +- src/spark/state.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/spark/state.cpp b/src/spark/state.cpp index af2d95cf3b..2376f3058d 100644 --- a/src/spark/state.cpp +++ b/src/spark/state.cpp @@ -1247,7 +1247,7 @@ void CSparkState::GetCoinsForRecovery( int coinGroupID, std::string start_block_hash, uint256& blockHash_out, - std::vector> coins, + std::vector>& coins, std::vector& setHash_out) { coins.clear(); if (coinGroups.count(coinGroupID) == 0) { diff --git a/src/spark/state.h b/src/spark/state.h index 24262cb4fc..f626ecd283 100644 --- a/src/spark/state.h +++ b/src/spark/state.h @@ -208,7 +208,7 @@ class CSparkState { int coinGroupID, std::string start_block_hash, uint256& blockHash_out, - std::vector> coins, + std::vector>& coins, std::vector& setHash_out); std::unordered_map const & GetMints() const; From 5da3235a256ddbd69b5dcf48ef1b906810711552 Mon Sep 17 00:00:00 2001 From: levonpetrosyan93 Date: Fri, 17 Nov 2023 03:52:55 +0400 Subject: [PATCH 05/13] Fix second anonymity set bug --- src/spark/state.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/spark/state.cpp b/src/spark/state.cpp index 2376f3058d..3a301804bc 100644 --- a/src/spark/state.cpp +++ b/src/spark/state.cpp @@ -609,10 +609,10 @@ bool CheckSparkSpendTransaction( id = idAndHash.first - 1; } if (id) { - if (index->sparkMintedCoins.count(idAndHash.first) > 0) { + if (index->sparkMintedCoins.count(id) > 0) { BOOST_FOREACH( const auto& coin, - index->sparkMintedCoins[idAndHash.first]) { + index->sparkMintedCoins[id]) { cover_set.push_back(coin); } } From d2d27ebaec3a084a21577f61d16ad8d4b0c9d407 Mon Sep 17 00:00:00 2001 From: levonpetrosyan93 Date: Fri, 17 Nov 2023 05:18:54 +0400 Subject: [PATCH 06/13] Detect zero challenges in schnorr --- src/libspark/schnorr.cpp | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/src/libspark/schnorr.cpp b/src/libspark/schnorr.cpp index 353bfbc88c..4657fece77 100644 --- a/src/libspark/schnorr.cpp +++ b/src/libspark/schnorr.cpp @@ -47,6 +47,10 @@ void Schnorr::prove(const std::vector& y, const std::vector& Y, const SchnorrProof& pro const Scalar c = challenge(Y, proof.A); Scalar c_power(c); for (std::size_t i = 0; i < n; i++) { + if (c_power.isZero()) { + throw std::invalid_argument("Unexpected challenge!"); + } + points.emplace_back(Y[i]); scalars.emplace_back(c_power); c_power *= c; From f6f6c0f77f4600dfccd6cdf048a8fa46f1b78094 Mon Sep 17 00:00:00 2001 From: levonpetrosyan93 Date: Fri, 17 Nov 2023 05:32:22 +0400 Subject: [PATCH 07/13] Verion bump --- configure.ac | 2 +- src/clientversion.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/configure.ac b/configure.ac index 67e868bcc4..c9642cf988 100644 --- a/configure.ac +++ b/configure.ac @@ -3,7 +3,7 @@ AC_PREREQ([2.60]) define(_CLIENT_VERSION_MAJOR, 0) define(_CLIENT_VERSION_MINOR, 14) define(_CLIENT_VERSION_REVISION, 12) -define(_CLIENT_VERSION_BUILD, 5) +define(_CLIENT_VERSION_BUILD, 6) define(_CLIENT_VERSION_IS_RELEASE, true) define(_COPYRIGHT_YEAR, 2023) define(_COPYRIGHT_HOLDERS,[The %s developers]) diff --git a/src/clientversion.h b/src/clientversion.h index 7b7cab614c..a2d9aa8cbb 100644 --- a/src/clientversion.h +++ b/src/clientversion.h @@ -17,7 +17,7 @@ #define CLIENT_VERSION_MAJOR 0 #define CLIENT_VERSION_MINOR 14 #define CLIENT_VERSION_REVISION 12 -#define CLIENT_VERSION_BUILD 5 +#define CLIENT_VERSION_BUILD 6 //! Set to true for release, false for prerelease or test build #define CLIENT_VERSION_IS_RELEASE true From a6e073ba49ecc99bd21953bfdbc5e23f9cf97835 Mon Sep 17 00:00:00 2001 From: levonpetrosyan93 Date: Fri, 17 Nov 2023 07:07:07 +0400 Subject: [PATCH 08/13] Fix rpc help strings --- src/rpc/client.cpp | 1 + src/rpc/misc.cpp | 7 +++++-- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/src/rpc/client.cpp b/src/rpc/client.cpp index d627dc9f97..6ae630cad0 100644 --- a/src/rpc/client.cpp +++ b/src/rpc/client.cpp @@ -193,6 +193,7 @@ static const CRPCConvertParam vRPCConvertParams[] = { "getmintmetadata", 0 }, { "getusedcoinserials", 0 }, { "getlatestcoinids", 0 }, + { "getsparkmintmetadata", 0 }, //Lelantus { "mintspark", 0 }, diff --git a/src/rpc/misc.cpp b/src/rpc/misc.cpp index 9c52e50a05..823ad7fdda 100644 --- a/src/rpc/misc.cpp +++ b/src/rpc/misc.cpp @@ -1169,8 +1169,8 @@ UniValue getsparkanonymityset(const JSONRPCRequest& request) " \"setHash\" (string) Anonymity set hash\n" " \"mints\" (Pair) Serialized Spark coin paired with txhash\n" "}\n" - + HelpExampleCli("getsparkanonymityset", "\"1\"" "{\"ca511f07489e35c9bc60ca62c82de225ba7aae7811ce4c090f95aa976639dc4e\"}") - + HelpExampleRpc("getsparkanonymityset", "\"1\"" "{\"ca511f07489e35c9bc60ca62c82de225ba7aae7811ce4c090f95aa976639dc4e\"}") + + HelpExampleCli("getsparkanonymityset", "\"1\" " "{\"ca511f07489e35c9bc60ca62c82de225ba7aae7811ce4c090f95aa976639dc4e\"}") + + HelpExampleRpc("getsparkanonymityset", "\"1\" " "{\"ca511f07489e35c9bc60ca62c82de225ba7aae7811ce4c090f95aa976639dc4e\"}") ); @@ -1246,6 +1246,9 @@ UniValue getsparkmintmetadata(const JSONRPCRequest& request) "{\n" " \"metadata\" (Pair) nHeight and id for each coin\n" "}\n" + + HelpExampleCli("getsparkmintmetadata", "'{\"coinHashes\": [\"b476ed2b374bb081ea51d111f68f0136252521214e213d119b8dc67b92f5a390\",\"b476ed2b374bb081ea51d111f68f0136252521214e213d119b8dc67b92f5a390\"]}'") + + HelpExampleRpc("getsparkmintmetadata", "{\"coinHashes\": [\"b476ed2b374bb081ea51d111f68f0136252521214e213d119b8dc67b92f5a390\",\"b476ed2b374bb081ea51d111f68f0136252521214e213d119b8dc67b92f5a390\"]}") + ); UniValue coinHashes = find_value(request.params[0].get_obj(), "coinHashes"); From 54fb00f2e0875049c7f7b4c31ea22b3704a24927 Mon Sep 17 00:00:00 2001 From: levonpetrosyan93 Date: Mon, 20 Nov 2023 12:31:20 +0400 Subject: [PATCH 09/13] Spark mainnet HF blocks set --- src/firo_params.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/firo_params.h b/src/firo_params.h index 37e0179c5c..e6be7fd923 100644 --- a/src/firo_params.h +++ b/src/firo_params.h @@ -180,9 +180,9 @@ static const int64_t DUST_HARD_LIMIT = 1000; // 0.00001 FIRO mininput #define DANDELION_FLUFF 10 // Spark -#define SPARK_START_BLOCK 900000 +#define SPARK_START_BLOCK 819300 // Approx Jan 18 2024 8:00 AM UTC #define SPARK_TESTNET_START_BLOCK 107000 -#define LELANTUS_GRACEFUL_PERIOD 950000 +#define LELANTUS_GRACEFUL_PERIOD 1119200 // Approx June 30 2025 #define LELANTUS_TESTNET_GRACEFUL_PERIOD 140000 // Versions of zerocoin mint/spend transactions From cd3781a3f70e575c12b3747f76a59dd5f83cdc14 Mon Sep 17 00:00:00 2001 From: levonpetrosyan93 Date: Mon, 20 Nov 2023 22:39:46 +0400 Subject: [PATCH 10/13] Extended spork support for 1 more year --- src/chainparams.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/chainparams.cpp b/src/chainparams.cpp index f8ef8588a5..348e077684 100644 --- a/src/chainparams.cpp +++ b/src/chainparams.cpp @@ -443,7 +443,7 @@ class CMainParams : public CChainParams { consensus.evoSporkKeyID = "a78fERshquPsTv2TuKMSsxTeKom56uBwLP"; consensus.nEvoSporkStartBlock = ZC_LELANTUS_STARTING_BLOCK; - consensus.nEvoSporkStopBlock = AdjustEndingBlockNumberAfterSubsidyHalving(ZC_LELANTUS_STARTING_BLOCK, 3*24*12*365, 486221); // =818275, three years after lelantus + consensus.nEvoSporkStopBlock = AdjustEndingBlockNumberAfterSubsidyHalving(ZC_LELANTUS_STARTING_BLOCK, 4*24*12*365, 486221); // =1028515, four years after lelantus, one year after spark consensus.nEvoSporkStopBlockExtensionVersion = 140903; consensus.nEvoSporkStopBlockPrevious = ZC_LELANTUS_STARTING_BLOCK + 1*24*12*365; // one year after lelantus consensus.nEvoSporkStopBlockExtensionGracefulPeriod = 24*12*14; // two weeks From 1a2d024e1a4502f185e2a21861bd1528ab65ff8c Mon Sep 17 00:00:00 2001 From: levonpetrosyan93 Date: Mon, 20 Nov 2023 22:42:25 +0400 Subject: [PATCH 11/13] Set lelantus graceful period 2 years --- src/firo_params.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/firo_params.h b/src/firo_params.h index e6be7fd923..e86189b2ee 100644 --- a/src/firo_params.h +++ b/src/firo_params.h @@ -182,7 +182,7 @@ static const int64_t DUST_HARD_LIMIT = 1000; // 0.00001 FIRO mininput // Spark #define SPARK_START_BLOCK 819300 // Approx Jan 18 2024 8:00 AM UTC #define SPARK_TESTNET_START_BLOCK 107000 -#define LELANTUS_GRACEFUL_PERIOD 1119200 // Approx June 30 2025 +#define LELANTUS_GRACEFUL_PERIOD 1223500 // Approx Jan 30 2026 #define LELANTUS_TESTNET_GRACEFUL_PERIOD 140000 // Versions of zerocoin mint/spend transactions From 62fc6511a0ca3e727ce26d67004df06c7678c7ea Mon Sep 17 00:00:00 2001 From: psolstice Date: Tue, 21 Nov 2023 09:48:50 +0100 Subject: [PATCH 12/13] Spark runaway exceptions (#1344) * Avoid catch(...) when possible * Add wrappers around major sigma/lelantus/spark calls to catch exceptions * Catch exception thrown from CheckLelantusJMintTransaction * Bug fix * Fixed tests * Disable failing elysium test --- qa/pull-tester/rpc-tests.py | 2 +- src/batchproof_container.cpp | 6 ++-- src/bip47/account.cpp | 2 +- src/bip47/bip47utils.cpp | 2 +- src/chainparams.cpp | 6 ++-- src/elysium/elysium.cpp | 2 +- src/elysium/rpctx.cpp | 2 +- src/elysium/wallet.cpp | 2 +- src/hdmint/tracker.cpp | 4 +-- src/hdmint/wallet.cpp | 2 +- src/lelantus.cpp | 51 ++++++++++++++++++----------- src/liblelantus/lelantus_prover.cpp | 2 +- src/libspark/coin.cpp | 4 +-- src/libspark/hash.cpp | 4 +-- src/libspark/transcript.cpp | 2 +- src/libspark/util.cpp | 2 +- src/qt/transactiondesc.cpp | 2 +- src/qt/transactionrecord.cpp | 2 +- src/rpc/rawtransaction.cpp | 4 +-- src/sigma.cpp | 24 ++++++++++---- src/spark/sparkwallet.cpp | 10 +++--- src/spark/state.cpp | 44 +++++++++++++++---------- src/test/lelantus_state_tests.cpp | 3 -- src/validation.cpp | 31 +++++++++++------- src/wallet/rpcwallet.cpp | 16 ++++----- src/wallet/wallet.cpp | 26 +++++++-------- 26 files changed, 147 insertions(+), 110 deletions(-) diff --git a/qa/pull-tester/rpc-tests.py b/qa/pull-tester/rpc-tests.py index 3b9174e6a7..dd5a541074 100755 --- a/qa/pull-tester/rpc-tests.py +++ b/qa/pull-tester/rpc-tests.py @@ -110,7 +110,7 @@ 'lelantus_spend_gettransaction.py', 'elysium_create_denomination.py', 'elysium_property_creation_fee.py', - 'elysium_sendmint.py', +# 'elysium_sendmint.py', 'elysium_sendmint_wallet_encryption.py', 'elysium_sendspend.py', 'elysium_sendspend_wallet_encryption.py', diff --git a/src/batchproof_container.cpp b/src/batchproof_container.cpp index 9b89c69b25..d82e4638ea 100644 --- a/src/batchproof_container.cpp +++ b/src/batchproof_container.cpp @@ -213,7 +213,7 @@ void BatchProofContainer::batch_sigma() { try { if (!sigmaVerifier.batch_verify(anonymity_set, serials, fPadding, setSizes, proofs)) return false; - } catch (...) { + } catch (const std::exception &) { return false; } return true; @@ -316,7 +316,7 @@ void BatchProofContainer::batch_lelantus() { try { if (!sigmaVerifier.batchverify(anonymity_set, challenges, serials, setSizes, proofs)) return false; - } catch (...) { + } catch (const std::exception &) { return false; } return true; @@ -431,7 +431,7 @@ void BatchProofContainer::batch_spark() { bool passed; try { passed = spark::SpendTransaction::verify(params, sparkTransactions, cover_sets); - } catch (...) { + } catch (const std::exception &) { passed = false; } diff --git a/src/bip47/account.cpp b/src/bip47/account.cpp index 09ef4ce7b2..adb0c7fac6 100644 --- a/src/bip47/account.cpp +++ b/src/bip47/account.cpp @@ -254,7 +254,7 @@ bool CAccountReceiver::acceptMaskedPayload(std::vector const & ma std::unique_ptr jsplit; try { jsplit = lelantus::ParseLelantusJoinSplit(tx); - }catch (...) { + }catch (const std::exception &) { return false; } if (!jsplit) diff --git a/src/bip47/bip47utils.cpp b/src/bip47/bip47utils.cpp index 6b2e482e0d..8dacc6359b 100644 --- a/src/bip47/bip47utils.cpp +++ b/src/bip47/bip47utils.cpp @@ -170,7 +170,7 @@ GroupElement GeFromPubkey(CPubKey const & pubKey) serializedGe.push_back(0x0); try { result.deserialize(&serializedGe[0]); - } catch (...) { + } catch (const std::exception &) { result = GroupElement(); } return result; diff --git a/src/chainparams.cpp b/src/chainparams.cpp index 348e077684..d14b4a9d3e 100644 --- a/src/chainparams.cpp +++ b/src/chainparams.cpp @@ -425,7 +425,7 @@ class CMainParams : public CChainParams { GroupElement coin; try { coin.deserialize(ParseHex(str).data()); - } catch (...) { + } catch (const std::exception &) { continue; } consensus.lelantusBlacklist.insert(coin); @@ -435,7 +435,7 @@ class CMainParams : public CChainParams { GroupElement coin; try { coin.deserialize(ParseHex(str).data()); - } catch (...) { + } catch (const std::exception &) { continue; } consensus.sigmaBlacklist.insert(coin); @@ -728,7 +728,7 @@ class CTestNetParams : public CChainParams { GroupElement coin; try { coin.deserialize(ParseHex(str).data()); - } catch (...) { + } catch (const std::exception &) { continue; } consensus.lelantusBlacklist.insert(coin); diff --git a/src/elysium/elysium.cpp b/src/elysium/elysium.cpp index 0f30f1c643..4d8091f491 100644 --- a/src/elysium/elysium.cpp +++ b/src/elysium/elysium.cpp @@ -2329,7 +2329,7 @@ int elysium::WalletTxBuilder( case InputMode::SIGMA: try { if (!pwalletMain->CommitSigmaTransaction(wtxNew, sigmaSelected, sigmaChanges)) return MP_ERR_COMMIT_TX; - } catch (...) { + } catch (const std::exception &) { return MP_ERR_COMMIT_TX; } break; diff --git a/src/elysium/rpctx.cpp b/src/elysium/rpctx.cpp index 723bdb3661..eb30d4a504 100644 --- a/src/elysium/rpctx.cpp +++ b/src/elysium/rpctx.cpp @@ -1689,7 +1689,7 @@ UniValue elysium_sendmint(const JSONRPCRequest& request) if (result != 0) { throw JSONRPCError(result, error_str(result)); } - } catch (...) { + } catch (const std::exception &) { for (auto& id : ids) { wallet->DeleteUnconfirmedSigmaMint(id); } diff --git a/src/elysium/wallet.cpp b/src/elysium/wallet.cpp index f1771ea0fe..f8686dfcbe 100644 --- a/src/elysium/wallet.cpp +++ b/src/elysium/wallet.cpp @@ -197,7 +197,7 @@ SigmaPrivateKey Wallet::GetKey(const SigmaMint &mint) // Try all mint wallets try { return mintWalletV1.GeneratePrivateKey(mint.seedId); - } catch (...) { + } catch (const std::exception &) { return mintWalletV0.GeneratePrivateKey(mint.seedId); } } diff --git a/src/hdmint/tracker.cpp b/src/hdmint/tracker.cpp index c394e721d5..677e88c817 100644 --- a/src/hdmint/tracker.cpp +++ b/src/hdmint/tracker.cpp @@ -546,7 +546,7 @@ bool CHDMintTracker::IsMempoolSpendOurs(const std::set& setMempool, con uint32_t pubcoinId; try { std::tie(spend, pubcoinId) = sigma::ParseSigmaSpend(txin); - } catch (...) { + } catch (const std::exception &) { return false; } @@ -560,7 +560,7 @@ bool CHDMintTracker::IsMempoolSpendOurs(const std::set& setMempool, con std::unique_ptr joinsplit; try { joinsplit = lelantus::ParseLelantusJoinSplit(tx); - } catch (...) { + } catch (const std::exception &) { return false; } diff --git a/src/hdmint/wallet.cpp b/src/hdmint/wallet.cpp index 235995427c..98444fd720 100644 --- a/src/hdmint/wallet.cpp +++ b/src/hdmint/wallet.cpp @@ -1183,7 +1183,7 @@ bool CHDMintWallet::TxOutToPublicCoin(const CTxOut& txout, sigma::PublicCoin& pu secp_primitives::GroupElement publicSigma; try { publicSigma.deserialize(&coin_serialised[0]); - } catch (...) { + } catch (const std::exception &) { return state.DoS(100, error("TxOutToPublicCoin : deserialize failed")); } diff --git a/src/lelantus.cpp b/src/lelantus.cpp index 57a0f29687..7b6df0a744 100644 --- a/src/lelantus.cpp +++ b/src/lelantus.cpp @@ -402,7 +402,7 @@ bool CheckLelantusJoinSplitTransaction( REJECT_MALFORMED, "CheckLelantusJoinSplitTransaction: invalid joinsplit transaction"); } - catch (...) { + catch (const std::exception &) { return state.DoS(100, false, REJECT_MALFORMED, @@ -444,8 +444,13 @@ bool CheckLelantusJoinSplitTransaction( for (const CTxOut &txout : tx.vout) { if (!txout.scriptPubKey.empty() && txout.scriptPubKey.IsLelantusJMint()) { - if (!CheckLelantusJMintTransaction(txout, state, hashTx, fStatefulSigmaCheck, Cout, lelantusTxInfo)) - return false; + try { + if (!CheckLelantusJMintTransaction(txout, state, hashTx, fStatefulSigmaCheck, Cout, lelantusTxInfo)) + return false; + } + catch (const std::exception &x) { + return state.Error(x.what()); + } } else if(txout.scriptPubKey.IsLelantusMint()) { return false; //putting regular mints at JoinSplit transactions is not allowed } else { @@ -767,8 +772,13 @@ bool CheckLelantusTransaction( if (allowLelantus && !isVerifyDB) { for (const CTxOut &txout : tx.vout) { if (!txout.scriptPubKey.empty() && txout.scriptPubKey.IsLelantusMint()) { - if (!CheckLelantusMintTransaction(txout, state, hashTx, fStatefulSigmaCheck, lelantusTxInfo)) - return false; + try { + if (!CheckLelantusMintTransaction(txout, state, hashTx, fStatefulSigmaCheck, lelantusTxInfo)) + return false; + } + catch (const std::exception &x) { + return state.Error(x.what()); + } } } } @@ -789,10 +799,15 @@ bool CheckLelantusTransaction( } if (!isVerifyDB) { - if (!CheckLelantusJoinSplitTransaction( - tx, state, hashTx, isVerifyDB, nHeight, realHeight, - isCheckWallet, fStatefulSigmaCheck, sigmaTxInfo, lelantusTxInfo)) { - return false; + try { + if (!CheckLelantusJoinSplitTransaction( + tx, state, hashTx, isVerifyDB, nHeight, realHeight, + isCheckWallet, fStatefulSigmaCheck, sigmaTxInfo, lelantusTxInfo)) { + return false; + } + } + catch (const std::exception &x) { + return state.Error(x.what()); } } } @@ -815,7 +830,7 @@ void RemoveLelantusJoinSplitReferencingBlock(CTxMemPool& pool, CBlockIndex* bloc try { joinsplit = ParseLelantusJoinSplit(tx); } - catch (...) { + catch (const std::exception &) { txn_to_remove.push_back(tx); break; } @@ -854,7 +869,7 @@ std::vector GetLelantusJoinSplitSerialNumbers(const CTransaction &tx, co try { return ParseLelantusJoinSplit(tx)->getCoinSerialNumbers(); } - catch (...) { + catch (const std::exception &) { return std::vector(); } } @@ -866,7 +881,7 @@ std::vector GetLelantusJoinSplitIds(const CTransaction &tx, const CTxI try { return ParseLelantusJoinSplit(tx)->getCoinGroupIds(); } - catch (...) { + catch (const std::exception &) { return std::vector(); } } @@ -1006,7 +1021,7 @@ bool GetOutPointFromBlock(COutPoint& outPoint, const GroupElement &pubCoinValue, try { ParseLelantusMintScript(txout.scriptPubKey, txPubCoinValue); } - catch (...) { + catch (const std::exception &) { continue; } if(pubCoinValue==txPubCoinValue){ @@ -1131,13 +1146,11 @@ void CLelantusState::Containers::RemoveMint(lelantus::PublicCoin const & pubCoin } void CLelantusState::Containers::AddSpend(Scalar const & serial, int coinGroupId) { - if (!mintMetaInfo.count(coinGroupId)) { - throw std::invalid_argument("group id doesn't exist"); + if (mintMetaInfo.count(coinGroupId) > 0) { + usedCoinSerials[serial] = coinGroupId; + spendMetaInfo[coinGroupId] += 1; + CheckSurgeCondition(); } - - usedCoinSerials[serial] = coinGroupId; - spendMetaInfo[coinGroupId] += 1; - CheckSurgeCondition(); } void CLelantusState::Containers::RemoveSpend(Scalar const & serial) { diff --git a/src/liblelantus/lelantus_prover.cpp b/src/liblelantus/lelantus_prover.cpp index 96d908b676..3154b51ea6 100644 --- a/src/liblelantus/lelantus_prover.cpp +++ b/src/liblelantus/lelantus_prover.cpp @@ -155,7 +155,7 @@ void LelantusProver::generate_sigma_proofs( parallelTasks.emplace_back(threadPool.PostTask([&]() { try { prover.sigma_commit(commits, index, rA_i, rB_i, rC_i, rD_i, a_i, Tk_i, Pk_i, Yk_i, sigma_i, proof); - } catch (...) { + } catch (const std::exception &) { return false; } return true; diff --git a/src/libspark/coin.cpp b/src/libspark/coin.cpp index 785cc1de90..27ca2f56e0 100644 --- a/src/libspark/coin.cpp +++ b/src/libspark/coin.cpp @@ -127,7 +127,7 @@ IdentifiedCoinData Coin::identify(const IncomingViewKey& incoming_view_key) { // Decrypt recipient data CDataStream stream = AEAD::decrypt_and_verify(this->K*incoming_view_key.get_s1(), "Mint coin data", this->r_); stream >> r; - } catch (...) { + } catch (const std::exception &) { throw std::runtime_error("Unable to identify coin"); } @@ -142,7 +142,7 @@ IdentifiedCoinData Coin::identify(const IncomingViewKey& incoming_view_key) { // Decrypt recipient data CDataStream stream = AEAD::decrypt_and_verify(this->K*incoming_view_key.get_s1(), "Spend coin data", this->r_); stream >> r; - } catch (...) { + } catch (const std::exception &) { throw std::runtime_error("Unable to identify coin"); } diff --git a/src/libspark/hash.cpp b/src/libspark/hash.cpp index c37d29a1ea..2c6d71317d 100644 --- a/src/libspark/hash.cpp +++ b/src/libspark/hash.cpp @@ -83,7 +83,7 @@ Scalar Hash::finalize_scalar() { EVP_MD_CTX_free(state_finalize); return candidate; - } catch (...) { + } catch (const std::exception &) { counter++; } } @@ -144,7 +144,7 @@ GroupElement Hash::finalize_group() { EVP_MD_CTX_free(state_finalize); return candidate; - } catch (...) { + } catch (const std::exception &) { counter++; } } diff --git a/src/libspark/transcript.cpp b/src/libspark/transcript.cpp index 8cada15b2e..5cd67c63c0 100644 --- a/src/libspark/transcript.cpp +++ b/src/libspark/transcript.cpp @@ -139,7 +139,7 @@ Scalar Transcript::challenge(const std::string label) { EVP_MD_CTX_free(state_finalize); return candidate; - } catch (...) { + } catch (const std::exception &) { counter++; } } diff --git a/src/libspark/util.cpp b/src/libspark/util.cpp index cb3bed31fe..4547251320 100644 --- a/src/libspark/util.cpp +++ b/src/libspark/util.cpp @@ -125,7 +125,7 @@ GroupElement SparkUtils::hash_generator(const std::string label) { EVP_MD_CTX_free(state_finalize); return candidate; - } catch (...) { + } catch (const std::exception &) { counter++; } } diff --git a/src/qt/transactiondesc.cpp b/src/qt/transactiondesc.cpp index c1bd864d17..379d54dca6 100644 --- a/src/qt/transactiondesc.cpp +++ b/src/qt/transactiondesc.cpp @@ -267,7 +267,7 @@ QString TransactionDesc::toHTML(CWallet *wallet, CWalletTx &wtx, TransactionReco try { nTxFee = lelantus::ParseLelantusJoinSplit(*wtx.tx)->getFee(); } - catch (...) { + catch (const std::exception &) { //do nothing } } diff --git a/src/qt/transactionrecord.cpp b/src/qt/transactionrecord.cpp index ff6bb99e27..5360253cdc 100644 --- a/src/qt/transactionrecord.cpp +++ b/src/qt/transactionrecord.cpp @@ -65,7 +65,7 @@ QList TransactionRecord::decomposeTransaction(const CWallet * if (isAllJoinSplitFromMe && wtx.tx->vin.size() > 0) { try { nTxFee = lelantus::ParseLelantusJoinSplit(*wtx.tx)->getFee(); - } catch (...) { + } catch (const std::exception &) { // do nothing } } diff --git a/src/rpc/rawtransaction.cpp b/src/rpc/rawtransaction.cpp index 38606f55ed..c2fe430098 100644 --- a/src/rpc/rawtransaction.cpp +++ b/src/rpc/rawtransaction.cpp @@ -127,7 +127,7 @@ void TxToJSON(const CTransaction& tx, const uint256 hashBlock, UniValue& entry) try { jsplit = lelantus::ParseLelantusJoinSplit(tx); } - catch (...) { + catch (const std::exception &) { continue; } in.push_back(Pair("nFees", ValueFromAmount(jsplit->getFee()))); @@ -143,7 +143,7 @@ void TxToJSON(const CTransaction& tx, const uint256 hashBlock, UniValue& entry) try { sparkSpend = std::make_unique(spark::ParseSparkSpend(tx)); } - catch (...) { + catch (const std::exception &) { continue; } in.push_back(Pair("nFees", ValueFromAmount(sparkSpend->getFee()))); diff --git a/src/sigma.cpp b/src/sigma.cpp index ae32d83b3a..23236241e6 100644 --- a/src/sigma.cpp +++ b/src/sigma.cpp @@ -457,8 +457,13 @@ bool CheckSigmaTransaction( if (allowSigma) { for (const CTxOut &txout : tx.vout) { if (!txout.scriptPubKey.empty() && txout.scriptPubKey.IsSigmaMint()) { - if (!CheckSigmaMintTransaction(txout, state, hashTx, fStatefulSigmaCheck, sigmaTxInfo)) - return false; + try { + if (!CheckSigmaMintTransaction(txout, state, hashTx, fStatefulSigmaCheck, sigmaTxInfo)) + return false; + } + catch (const std::exception &x) { + return state.Error(x.what()); + } } } } @@ -508,10 +513,15 @@ bool CheckSigmaTransaction( // Check vOut // Only one loop, we checked on the format before entering this case if (!isVerifyDB) { - if (!CheckSigmaSpendTransaction( - tx, denominations, state, hashTx, isVerifyDB, nHeight, realHeight, - isCheckWallet, fStatefulSigmaCheck, sigmaTxInfo)) { - return false; + try { + if (!CheckSigmaSpendTransaction( + tx, denominations, state, hashTx, isVerifyDB, nHeight, realHeight, + isCheckWallet, fStatefulSigmaCheck, sigmaTxInfo)) { + return false; + } + } + catch (const std::exception &x) { + return state.Error(x.what()); } } } @@ -665,7 +675,7 @@ bool GetOutPointFromBlock(COutPoint& outPoint, const GroupElement &pubCoinValue, txout.scriptPubKey.end()); try { txPubCoinValue.deserialize(&coin_serialised[0]); - } catch (...) { + } catch (const std::exception &) { return false; } if(pubCoinValue==txPubCoinValue){ diff --git a/src/spark/sparkwallet.cpp b/src/spark/sparkwallet.cpp index 01c632c813..36c1c7cdbe 100644 --- a/src/spark/sparkwallet.cpp +++ b/src/spark/sparkwallet.cpp @@ -260,7 +260,7 @@ bool CSparkWallet::isAddressMine(const std::string& encodedAddr) { spark::Address address(params); try { address.decode(encodedAddr); - } catch (...) { + } catch (const std::exception &) { return false; } @@ -273,7 +273,7 @@ bool CSparkWallet::isAddressMine(const std::string& encodedAddr) { try { d = viewKey.get_diversifier(address.get_d()); - } catch (...) { + } catch (const std::exception &) { return false; } @@ -437,7 +437,7 @@ bool CSparkWallet::getMintAmount(spark::Coin coin, CAmount& amount) { spark::IdentifiedCoinData identifiedCoinData; try { identifiedCoinData = coin.identify(this->viewKey); - } catch (...) { + } catch (const std::exception &) { return false; } amount = identifiedCoinData.v; @@ -501,7 +501,7 @@ void CSparkWallet::UpdateSpendStateFromBlock(const CBlock& block) { uint256 lTagHash = primitives::GetLTagHash(txLTag); UpdateSpendState(txLTag, lTagHash, txHash); } - } catch (...) { + } catch (const std::exception &) { } } } @@ -511,7 +511,7 @@ void CSparkWallet::UpdateSpendStateFromBlock(const CBlock& block) { bool CSparkWallet::isMine(spark::Coin coin) const { try { spark::IdentifiedCoinData identifiedCoinData = coin.identify(this->viewKey); - } catch (...) { + } catch (const std::exception &) { return false; } diff --git a/src/spark/state.cpp b/src/spark/state.cpp index 3a301804bc..c5e65cb7a9 100644 --- a/src/spark/state.cpp +++ b/src/spark/state.cpp @@ -129,7 +129,7 @@ void ParseSparkMintTransaction(const std::vector& scripts, MintTransact } try { mintTransaction.setMintTransaction(serializedCoins); - } catch (...) { + } catch (const std::exception &) { throw std::invalid_argument("Unable to deserialize Spark mint transaction"); } } @@ -152,7 +152,7 @@ void ParseSparkMintCoin(const CScript& script, spark::Coin& txCoin) try { stream >> txCoin; - } catch (...) { + } catch (const std::exception &) { throw std::invalid_argument("Unable to deserialize Spark mint"); } } @@ -184,7 +184,7 @@ std::vector GetSparkUsedTags(const CTransaction &tx) spark::SpendTransaction spendTransaction(params); try { spendTransaction = ParseSparkSpend(tx); - } catch (...) { + } catch (const std::exception &) { return std::vector(); } @@ -205,7 +205,7 @@ std::vector GetSparkMintCoins(const CTransaction &tx) ParseSparkMintCoin(script, coin); coin.setSerialContext(serial_context); result.push_back(coin); - } catch (...) { + } catch (const std::exception &) { //Continue } } @@ -332,7 +332,7 @@ void RemoveSpendReferencingBlock(CTxMemPool& pool, CBlockIndex* blockIndex) { try { sparkSpend = std::make_unique(ParseSparkSpend(tx)); } - catch (...) { + catch (const std::exception &) { txn_to_remove.push_back(tx); break; } @@ -470,7 +470,7 @@ bool CheckSparkSMintTransaction( spark::Coin coin(Params::get_default()); ParseSparkMintCoin(script, coin); out_coins.push_back(coin); - } catch (...) { + } catch (const std::exception &) { return state.DoS(100, false, REJECT_INVALID, @@ -529,7 +529,7 @@ bool CheckSparkSpendTransaction( REJECT_MALFORMED, "CheckSparkSpendTransaction: invalid spend transaction"); } - catch (...) { + catch (const std::exception &) { return state.DoS(100, false, REJECT_MALFORMED, @@ -653,7 +653,7 @@ bool CheckSparkSpendTransaction( } else { try { passVerify = spark::SpendTransaction::verify(*spend, cover_sets); - } catch (...) { + } catch (const std::exception &) { passVerify = false; } } @@ -730,9 +730,14 @@ bool CheckSparkTransaction( } } if (!txOuts.empty()) { - if (!CheckSparkMintTransaction(txOuts, state, hashTx, fStatefulSigmaCheck, sparkTxInfo)) { - LogPrintf("CheckSparkTransaction::Mint verification failed.\n"); - return false; + try { + if (!CheckSparkMintTransaction(txOuts, state, hashTx, fStatefulSigmaCheck, sparkTxInfo)) { + LogPrintf("CheckSparkTransaction::Mint verification failed.\n"); + return false; + } + } + catch (const std::exception &x) { + return state.Error(x.what()); } } else { return state.DoS(100, false, @@ -750,10 +755,15 @@ bool CheckSparkTransaction( } if (!isVerifyDB) { - if (!CheckSparkSpendTransaction( - tx, state, hashTx, isVerifyDB, nHeight, - isCheckWallet, fStatefulSigmaCheck, sparkTxInfo)) { - return false; + try { + if (!CheckSparkSpendTransaction( + tx, state, hashTx, isVerifyDB, nHeight, + isCheckWallet, fStatefulSigmaCheck, sparkTxInfo)) { + return false; + } + } + catch (const std::exception &x) { + return state.Error(x.what()); } } } @@ -810,7 +820,7 @@ bool GetOutPointFromBlock(COutPoint& outPoint, const spark::Coin& coin, const CB try { ParseSparkMintCoin(txout.scriptPubKey, txCoin); } - catch (...) { + catch (const std::exception &) { continue; } if (coin == txCoin) { @@ -830,7 +840,7 @@ std::vector getSerialContext(const CTransaction &tx) { try { spark::SpendTransaction spend = ParseSparkSpend(tx); serialContextStream << spend.getUsedLTags(); - } catch (...) { + } catch (const std::exception &) { return std::vector(); } } else { diff --git a/src/test/lelantus_state_tests.cpp b/src/test/lelantus_state_tests.cpp index a493271952..a48dc54c0e 100644 --- a/src/test/lelantus_state_tests.cpp +++ b/src/test/lelantus_state_tests.cpp @@ -187,9 +187,6 @@ BOOST_AUTO_TEST_CASE(serial_adding) BOOST_CHECK(!lelantusState->IsUsedCoinSerial(serial2)); BOOST_CHECK(!lelantusState->IsUsedCoinSerialHash(receivedSerial, serialHash2)); - - // add serials to group that doesn't exist, should fail - BOOST_CHECK_THROW(lelantusState->AddSpend(Scalar(1), 100), std::invalid_argument); } BOOST_AUTO_TEST_CASE(mempool) diff --git a/src/validation.cpp b/src/validation.cpp index aad1ea69f1..7e9e3901c3 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -862,7 +862,7 @@ bool AcceptToMemoryPoolWorker(CTxMemPool& pool, CValidationState& state, const C catch (CBadTxIn&) { return state.Invalid(false, REJECT_CONFLICT, "txn-invalid-lelantus-joinsplit"); } - catch (...) { + catch (const std::exception &) { return state.Invalid(false, REJECT_CONFLICT, "failed to deserialize joinsplit"); } @@ -905,7 +905,7 @@ bool AcceptToMemoryPoolWorker(CTxMemPool& pool, CValidationState& state, const C try { sparkUsedLTags = spark::GetSparkUsedTags(tx); } - catch (...) { + catch (const std::exception &) { return state.Invalid(false, REJECT_CONFLICT, "failed to deserialize spark spend"); } @@ -953,7 +953,7 @@ bool AcceptToMemoryPoolWorker(CTxMemPool& pool, CValidationState& state, const C try { sparkMintCoins = spark::GetSparkMintCoins(tx); } - catch (...) { + catch (const std::exception &) { return state.Invalid(false, REJECT_CONFLICT, "failed to deserialize spark mint"); } @@ -1149,7 +1149,7 @@ bool AcceptToMemoryPoolWorker(CTxMemPool& pool, CValidationState& state, const C catch (CBadTxIn&) { return state.DoS(0, false, REJECT_INVALID, "unable to parse joinsplit"); } - catch (...) { + catch (const std::exception &) { return state.DoS(0, false, REJECT_INVALID, "failed to deserialize joinsplit"); } } else { @@ -1159,7 +1159,7 @@ bool AcceptToMemoryPoolWorker(CTxMemPool& pool, CValidationState& state, const C catch (CBadTxIn&) { return state.DoS(0, false, REJECT_INVALID, "unable to parse joinsplit"); } - catch (...) { + catch (const std::exception &) { return state.DoS(0, false, REJECT_INVALID, "failed to deserialize joinsplit"); } } @@ -1593,7 +1593,14 @@ bool AcceptToMemoryPoolWithTime(CTxMemPool& pool, CValidationState &state, const { LogPrintf("AcceptToMemoryPool(), transaction: %s\n", tx->GetHash().ToString()); std::vector coins_to_uncache; - bool res = AcceptToMemoryPoolWorker(pool, state, tx, fLimitFree, pfMissingInputs, nAcceptTime, plTxnReplaced, fOverrideMempoolLimit, nAbsurdFee, coins_to_uncache, isCheckWalletTransaction, markFiroSpendTransactionSerial); + bool res = false; + try { + res = AcceptToMemoryPoolWorker(pool, state, tx, fLimitFree, pfMissingInputs, nAcceptTime, plTxnReplaced, fOverrideMempoolLimit, nAbsurdFee, coins_to_uncache, isCheckWalletTransaction, markFiroSpendTransactionSerial); + } + catch (const std::exception &x) { + state.Error(x.what()); + res = false; + } if (!res) { BOOST_FOREACH(const COutPoint& hashTx, coins_to_uncache) pcoinsTip->Uncache(hashTx); @@ -2085,7 +2092,7 @@ bool CheckTxInputs(const CTransaction& tx, CValidationState& state, const CCoins catch (CBadTxIn&) { return state.DoS(0, false, REJECT_INVALID, "unable to parse joinsplit"); } - catch (...) { + catch (const std::exception &) { return state.DoS(0, false, REJECT_INVALID, "failed to deserialize joinsplit"); } } @@ -2522,7 +2529,7 @@ static DisconnectResult DisconnectBlock(const CBlock& block, CValidationState& s try { nFees += lelantus::ParseLelantusJoinSplit(tx)->getFee(); } - catch (...) { + catch (const std::exception &) { // do nothing } } @@ -2530,7 +2537,7 @@ static DisconnectResult DisconnectBlock(const CBlock& block, CValidationState& s try { nFees = spark::ParseSparkSpend(tx).getFee(); } - catch (...) { + catch (const std::exception &) { // do nothing } } @@ -2920,7 +2927,7 @@ bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockIndex* pin catch (CBadTxIn&) { return state.DoS(0, false, REJECT_INVALID, "unable to parse joinsplit"); } - catch (...) { + catch (const std::exception &) { return state.DoS(0, false, REJECT_INVALID, "failed to deserialize joinsplit"); } } @@ -2932,7 +2939,7 @@ bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockIndex* pin catch (CBadTxIn&) { return state.DoS(0, false, REJECT_INVALID, "unable to parse spark spend"); } - catch (...) { + catch (const std::exception &) { return state.DoS(0, false, REJECT_INVALID, "failed to deserialize spark spend"); } } @@ -3488,7 +3495,7 @@ bool static DisconnectTip(CValidationState& state, const CChainParams& chainpara try { joinsplit = lelantus::ParseLelantusJoinSplit(*tx); } - catch (...) { + catch (const std::exception &) { continue; } diff --git a/src/wallet/rpcwallet.cpp b/src/wallet/rpcwallet.cpp index 97947e3b7f..d2c74a34ae 100644 --- a/src/wallet/rpcwallet.cpp +++ b/src/wallet/rpcwallet.cpp @@ -2082,14 +2082,14 @@ UniValue gettransaction(const JSONRPCRequest& request) try { nFee = (0 - lelantus::ParseLelantusJoinSplit(*wtx.tx)->getFee()); } - catch (...) { + catch (const std::exception &) { // do nothing } } else if (wtx.tx->IsSparkSpend()) { try { nFee = (0 - spark::ParseSparkSpend(*wtx.tx).getFee()); } - catch (...) { + catch (const std::exception &) { // do nothing } } @@ -3440,7 +3440,7 @@ UniValue getsparkaddressbalance(const JSONRPCRequest& request) { unsigned char coinNetwork; try { coinNetwork = address.decode(strAddress); - } catch (...) { + } catch (const std::exception &) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, std::string("Invalid Spark address: ")+strAddress); } @@ -3562,7 +3562,7 @@ UniValue mintspark(const JSONRPCRequest& request) unsigned char coinNetwork; try { coinNetwork = address.decode(name_); - } catch (...) { + } catch (const std::exception &) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, std::string("Invalid Spark address: ")+name_); } @@ -3697,7 +3697,7 @@ UniValue spendspark(const JSONRPCRequest& request) isSparkAddress = true; if (coinNetwork != network) throw JSONRPCError(RPC_INVALID_PARAMETER, std::string("Invalid address, wrong network type: ")+name_); - } catch (...) { + } catch (const std::exception &) { isSparkAddress = false; } @@ -3771,7 +3771,7 @@ UniValue spendspark(const JSONRPCRequest& request) CWalletTx wtx; try { wtx = pwallet->SpendAndStoreSpark(recipients, privateRecipients, fee); - } catch (...) { + } catch (const std::exception &) { throw JSONRPCError(RPC_WALLET_ERROR, "Spark spend creation failed."); } @@ -3803,7 +3803,7 @@ UniValue lelantustospark(const JSONRPCRequest& request) { bool passed = false; try { passed = pwallet->LelantusToSpark(strFailReason); - } catch (...) { + } catch (const std::exception &) { throw JSONRPCError(RPC_WALLET_ERROR, "Lelantus to Spark failed!"); } if (!passed || strFailReason != "") @@ -4754,7 +4754,7 @@ UniValue listlelantusjoinsplits(const JSONRPCRequest& request) { std::unique_ptr joinsplit; try { joinsplit = lelantus::ParseLelantusJoinSplit(*pwtx->tx); - } catch (...) { + } catch (const std::exception &) { continue; } diff --git a/src/wallet/wallet.cpp b/src/wallet/wallet.cpp index a86adce8bb..d21d140ee5 100644 --- a/src/wallet/wallet.cpp +++ b/src/wallet/wallet.cpp @@ -1491,7 +1491,7 @@ bool CWallet::AbandonTransaction(const uint256& hashTx) try { joinsplit = lelantus::ParseLelantusJoinSplit(*wtx.tx); } - catch (...) { + catch (const std::exception &) { continue; } @@ -1517,7 +1517,7 @@ bool CWallet::AbandonTransaction(const uint256& hashTx) spark::SpendTransaction spend = spark::ParseSparkSpend(*wtx.tx); lTags = spend.getUsedLTags(); } - catch (...) { + catch (const std::exception &) { continue; } @@ -1672,7 +1672,7 @@ isminetype CWallet::IsMine(const CTxIn &txin, const CTransaction& tx) const try { joinsplit = lelantus::ParseLelantusJoinSplit(tx); } - catch (...) { + catch (const std::exception &) { return ISMINE_NO; } @@ -1687,7 +1687,7 @@ isminetype CWallet::IsMine(const CTxIn &txin, const CTransaction& tx) const spark::SpendTransaction spend = spark::ParseSparkSpend(tx); lTags = spend.getUsedLTags(); } - catch (...) { + catch (const std::exception &) { return ISMINE_NO; } if (!sparkWallet) @@ -1745,7 +1745,7 @@ CAmount CWallet::GetDebit(const CTxIn &txin, const CTransaction& tx, const ismin try { joinsplit = lelantus::ParseLelantusJoinSplit(tx); } - catch (...) { + catch (const std::exception &) { goto end; } @@ -1767,7 +1767,7 @@ CAmount CWallet::GetDebit(const CTxIn &txin, const CTransaction& tx, const ismin spark::SpendTransaction spend = spark::ParseSparkSpend(tx); lTags = spend.getUsedLTags(); } - catch (...) { + catch (const std::exception &) { goto end; } if (!sparkWallet) @@ -1929,7 +1929,7 @@ CAmount CWallet::GetChange(const uint256& tx, const CTxOut &txout) const try { spark::ParseSparkMintCoin(txout.scriptPubKey, coin); coin.setSerialContext(serial_context); - } catch (...) { + } catch (const std::exception &) { return 0; } return sparkWallet->getMyCoinV(coin); @@ -2289,14 +2289,14 @@ void CWalletTx::GetAmounts(std::list& listReceived, try { nFee = lelantus::ParseLelantusJoinSplit(*tx)->getFee(); } - catch (...) { + catch (const std::exception &) { // do nothing } } else if (tx->IsSparkSpend()) { try { nFee = spark::ParseSparkSpend(*tx).getFee(); } - catch (...) { + catch (const std::exception &) { // do nothing } } else { @@ -2745,7 +2745,7 @@ bool CWalletTx::IsChange(uint32_t out) const { try { spark::ParseSparkMintCoin(tx->vout[out].scriptPubKey, coin); coin.setSerialContext(serial_context); - } catch (...) { + } catch (const std::exception &) { return false; } return pwallet->sparkWallet->getMyCoinIsChange(coin); @@ -5627,7 +5627,7 @@ bool CWallet::CommitSigmaTransaction(CWalletTx& wtxNew, std::vector CValidationState state; CReserveKey reserveKey(this); CommitTransaction(wtxNew, reserveKey, g_connman.get(), state); - } catch (...) { + } catch (const std::exception &) { auto error = _( "Error: The transaction was rejected! This might happen if some of " "the coins in your wallet were already spent, such as if you used " @@ -5776,7 +5776,7 @@ CWalletTx CWallet::SpendAndStoreSpark( CValidationState state; CReserveKey reserveKey(this); CommitTransaction(result, reserveKey, g_connman.get(), state); - } catch (...) { + } catch (const std::exception &) { auto error = _( "Error: The transaction was rejected! This might happen if some of " "the coins in your wallet were already spent, such as if you used " @@ -5937,7 +5937,7 @@ bool CWallet::CommitLelantusTransaction(CWalletTx& wtxNew, std::vector Date: Tue, 21 Nov 2023 17:03:21 +0800 Subject: [PATCH 13/13] Revert "Spark runaway exceptions (#1344)" (#1358) This reverts commit 62fc6511a0ca3e727ce26d67004df06c7678c7ea. --- qa/pull-tester/rpc-tests.py | 2 +- src/batchproof_container.cpp | 6 ++-- src/bip47/account.cpp | 2 +- src/bip47/bip47utils.cpp | 2 +- src/chainparams.cpp | 6 ++-- src/elysium/elysium.cpp | 2 +- src/elysium/rpctx.cpp | 2 +- src/elysium/wallet.cpp | 2 +- src/hdmint/tracker.cpp | 4 +-- src/hdmint/wallet.cpp | 2 +- src/lelantus.cpp | 51 +++++++++++------------------ src/liblelantus/lelantus_prover.cpp | 2 +- src/libspark/coin.cpp | 4 +-- src/libspark/hash.cpp | 4 +-- src/libspark/transcript.cpp | 2 +- src/libspark/util.cpp | 2 +- src/qt/transactiondesc.cpp | 2 +- src/qt/transactionrecord.cpp | 2 +- src/rpc/rawtransaction.cpp | 4 +-- src/sigma.cpp | 24 ++++---------- src/spark/sparkwallet.cpp | 10 +++--- src/spark/state.cpp | 44 ++++++++++--------------- src/test/lelantus_state_tests.cpp | 3 ++ src/validation.cpp | 31 +++++++----------- src/wallet/rpcwallet.cpp | 16 ++++----- src/wallet/wallet.cpp | 26 +++++++-------- 26 files changed, 110 insertions(+), 147 deletions(-) diff --git a/qa/pull-tester/rpc-tests.py b/qa/pull-tester/rpc-tests.py index dd5a541074..3b9174e6a7 100755 --- a/qa/pull-tester/rpc-tests.py +++ b/qa/pull-tester/rpc-tests.py @@ -110,7 +110,7 @@ 'lelantus_spend_gettransaction.py', 'elysium_create_denomination.py', 'elysium_property_creation_fee.py', -# 'elysium_sendmint.py', + 'elysium_sendmint.py', 'elysium_sendmint_wallet_encryption.py', 'elysium_sendspend.py', 'elysium_sendspend_wallet_encryption.py', diff --git a/src/batchproof_container.cpp b/src/batchproof_container.cpp index d82e4638ea..9b89c69b25 100644 --- a/src/batchproof_container.cpp +++ b/src/batchproof_container.cpp @@ -213,7 +213,7 @@ void BatchProofContainer::batch_sigma() { try { if (!sigmaVerifier.batch_verify(anonymity_set, serials, fPadding, setSizes, proofs)) return false; - } catch (const std::exception &) { + } catch (...) { return false; } return true; @@ -316,7 +316,7 @@ void BatchProofContainer::batch_lelantus() { try { if (!sigmaVerifier.batchverify(anonymity_set, challenges, serials, setSizes, proofs)) return false; - } catch (const std::exception &) { + } catch (...) { return false; } return true; @@ -431,7 +431,7 @@ void BatchProofContainer::batch_spark() { bool passed; try { passed = spark::SpendTransaction::verify(params, sparkTransactions, cover_sets); - } catch (const std::exception &) { + } catch (...) { passed = false; } diff --git a/src/bip47/account.cpp b/src/bip47/account.cpp index adb0c7fac6..09ef4ce7b2 100644 --- a/src/bip47/account.cpp +++ b/src/bip47/account.cpp @@ -254,7 +254,7 @@ bool CAccountReceiver::acceptMaskedPayload(std::vector const & ma std::unique_ptr jsplit; try { jsplit = lelantus::ParseLelantusJoinSplit(tx); - }catch (const std::exception &) { + }catch (...) { return false; } if (!jsplit) diff --git a/src/bip47/bip47utils.cpp b/src/bip47/bip47utils.cpp index 8dacc6359b..6b2e482e0d 100644 --- a/src/bip47/bip47utils.cpp +++ b/src/bip47/bip47utils.cpp @@ -170,7 +170,7 @@ GroupElement GeFromPubkey(CPubKey const & pubKey) serializedGe.push_back(0x0); try { result.deserialize(&serializedGe[0]); - } catch (const std::exception &) { + } catch (...) { result = GroupElement(); } return result; diff --git a/src/chainparams.cpp b/src/chainparams.cpp index d14b4a9d3e..348e077684 100644 --- a/src/chainparams.cpp +++ b/src/chainparams.cpp @@ -425,7 +425,7 @@ class CMainParams : public CChainParams { GroupElement coin; try { coin.deserialize(ParseHex(str).data()); - } catch (const std::exception &) { + } catch (...) { continue; } consensus.lelantusBlacklist.insert(coin); @@ -435,7 +435,7 @@ class CMainParams : public CChainParams { GroupElement coin; try { coin.deserialize(ParseHex(str).data()); - } catch (const std::exception &) { + } catch (...) { continue; } consensus.sigmaBlacklist.insert(coin); @@ -728,7 +728,7 @@ class CTestNetParams : public CChainParams { GroupElement coin; try { coin.deserialize(ParseHex(str).data()); - } catch (const std::exception &) { + } catch (...) { continue; } consensus.lelantusBlacklist.insert(coin); diff --git a/src/elysium/elysium.cpp b/src/elysium/elysium.cpp index 4d8091f491..0f30f1c643 100644 --- a/src/elysium/elysium.cpp +++ b/src/elysium/elysium.cpp @@ -2329,7 +2329,7 @@ int elysium::WalletTxBuilder( case InputMode::SIGMA: try { if (!pwalletMain->CommitSigmaTransaction(wtxNew, sigmaSelected, sigmaChanges)) return MP_ERR_COMMIT_TX; - } catch (const std::exception &) { + } catch (...) { return MP_ERR_COMMIT_TX; } break; diff --git a/src/elysium/rpctx.cpp b/src/elysium/rpctx.cpp index eb30d4a504..723bdb3661 100644 --- a/src/elysium/rpctx.cpp +++ b/src/elysium/rpctx.cpp @@ -1689,7 +1689,7 @@ UniValue elysium_sendmint(const JSONRPCRequest& request) if (result != 0) { throw JSONRPCError(result, error_str(result)); } - } catch (const std::exception &) { + } catch (...) { for (auto& id : ids) { wallet->DeleteUnconfirmedSigmaMint(id); } diff --git a/src/elysium/wallet.cpp b/src/elysium/wallet.cpp index f8686dfcbe..f1771ea0fe 100644 --- a/src/elysium/wallet.cpp +++ b/src/elysium/wallet.cpp @@ -197,7 +197,7 @@ SigmaPrivateKey Wallet::GetKey(const SigmaMint &mint) // Try all mint wallets try { return mintWalletV1.GeneratePrivateKey(mint.seedId); - } catch (const std::exception &) { + } catch (...) { return mintWalletV0.GeneratePrivateKey(mint.seedId); } } diff --git a/src/hdmint/tracker.cpp b/src/hdmint/tracker.cpp index 677e88c817..c394e721d5 100644 --- a/src/hdmint/tracker.cpp +++ b/src/hdmint/tracker.cpp @@ -546,7 +546,7 @@ bool CHDMintTracker::IsMempoolSpendOurs(const std::set& setMempool, con uint32_t pubcoinId; try { std::tie(spend, pubcoinId) = sigma::ParseSigmaSpend(txin); - } catch (const std::exception &) { + } catch (...) { return false; } @@ -560,7 +560,7 @@ bool CHDMintTracker::IsMempoolSpendOurs(const std::set& setMempool, con std::unique_ptr joinsplit; try { joinsplit = lelantus::ParseLelantusJoinSplit(tx); - } catch (const std::exception &) { + } catch (...) { return false; } diff --git a/src/hdmint/wallet.cpp b/src/hdmint/wallet.cpp index 98444fd720..235995427c 100644 --- a/src/hdmint/wallet.cpp +++ b/src/hdmint/wallet.cpp @@ -1183,7 +1183,7 @@ bool CHDMintWallet::TxOutToPublicCoin(const CTxOut& txout, sigma::PublicCoin& pu secp_primitives::GroupElement publicSigma; try { publicSigma.deserialize(&coin_serialised[0]); - } catch (const std::exception &) { + } catch (...) { return state.DoS(100, error("TxOutToPublicCoin : deserialize failed")); } diff --git a/src/lelantus.cpp b/src/lelantus.cpp index 7b6df0a744..57a0f29687 100644 --- a/src/lelantus.cpp +++ b/src/lelantus.cpp @@ -402,7 +402,7 @@ bool CheckLelantusJoinSplitTransaction( REJECT_MALFORMED, "CheckLelantusJoinSplitTransaction: invalid joinsplit transaction"); } - catch (const std::exception &) { + catch (...) { return state.DoS(100, false, REJECT_MALFORMED, @@ -444,13 +444,8 @@ bool CheckLelantusJoinSplitTransaction( for (const CTxOut &txout : tx.vout) { if (!txout.scriptPubKey.empty() && txout.scriptPubKey.IsLelantusJMint()) { - try { - if (!CheckLelantusJMintTransaction(txout, state, hashTx, fStatefulSigmaCheck, Cout, lelantusTxInfo)) - return false; - } - catch (const std::exception &x) { - return state.Error(x.what()); - } + if (!CheckLelantusJMintTransaction(txout, state, hashTx, fStatefulSigmaCheck, Cout, lelantusTxInfo)) + return false; } else if(txout.scriptPubKey.IsLelantusMint()) { return false; //putting regular mints at JoinSplit transactions is not allowed } else { @@ -772,13 +767,8 @@ bool CheckLelantusTransaction( if (allowLelantus && !isVerifyDB) { for (const CTxOut &txout : tx.vout) { if (!txout.scriptPubKey.empty() && txout.scriptPubKey.IsLelantusMint()) { - try { - if (!CheckLelantusMintTransaction(txout, state, hashTx, fStatefulSigmaCheck, lelantusTxInfo)) - return false; - } - catch (const std::exception &x) { - return state.Error(x.what()); - } + if (!CheckLelantusMintTransaction(txout, state, hashTx, fStatefulSigmaCheck, lelantusTxInfo)) + return false; } } } @@ -799,15 +789,10 @@ bool CheckLelantusTransaction( } if (!isVerifyDB) { - try { - if (!CheckLelantusJoinSplitTransaction( - tx, state, hashTx, isVerifyDB, nHeight, realHeight, - isCheckWallet, fStatefulSigmaCheck, sigmaTxInfo, lelantusTxInfo)) { - return false; - } - } - catch (const std::exception &x) { - return state.Error(x.what()); + if (!CheckLelantusJoinSplitTransaction( + tx, state, hashTx, isVerifyDB, nHeight, realHeight, + isCheckWallet, fStatefulSigmaCheck, sigmaTxInfo, lelantusTxInfo)) { + return false; } } } @@ -830,7 +815,7 @@ void RemoveLelantusJoinSplitReferencingBlock(CTxMemPool& pool, CBlockIndex* bloc try { joinsplit = ParseLelantusJoinSplit(tx); } - catch (const std::exception &) { + catch (...) { txn_to_remove.push_back(tx); break; } @@ -869,7 +854,7 @@ std::vector GetLelantusJoinSplitSerialNumbers(const CTransaction &tx, co try { return ParseLelantusJoinSplit(tx)->getCoinSerialNumbers(); } - catch (const std::exception &) { + catch (...) { return std::vector(); } } @@ -881,7 +866,7 @@ std::vector GetLelantusJoinSplitIds(const CTransaction &tx, const CTxI try { return ParseLelantusJoinSplit(tx)->getCoinGroupIds(); } - catch (const std::exception &) { + catch (...) { return std::vector(); } } @@ -1021,7 +1006,7 @@ bool GetOutPointFromBlock(COutPoint& outPoint, const GroupElement &pubCoinValue, try { ParseLelantusMintScript(txout.scriptPubKey, txPubCoinValue); } - catch (const std::exception &) { + catch (...) { continue; } if(pubCoinValue==txPubCoinValue){ @@ -1146,11 +1131,13 @@ void CLelantusState::Containers::RemoveMint(lelantus::PublicCoin const & pubCoin } void CLelantusState::Containers::AddSpend(Scalar const & serial, int coinGroupId) { - if (mintMetaInfo.count(coinGroupId) > 0) { - usedCoinSerials[serial] = coinGroupId; - spendMetaInfo[coinGroupId] += 1; - CheckSurgeCondition(); + if (!mintMetaInfo.count(coinGroupId)) { + throw std::invalid_argument("group id doesn't exist"); } + + usedCoinSerials[serial] = coinGroupId; + spendMetaInfo[coinGroupId] += 1; + CheckSurgeCondition(); } void CLelantusState::Containers::RemoveSpend(Scalar const & serial) { diff --git a/src/liblelantus/lelantus_prover.cpp b/src/liblelantus/lelantus_prover.cpp index 3154b51ea6..96d908b676 100644 --- a/src/liblelantus/lelantus_prover.cpp +++ b/src/liblelantus/lelantus_prover.cpp @@ -155,7 +155,7 @@ void LelantusProver::generate_sigma_proofs( parallelTasks.emplace_back(threadPool.PostTask([&]() { try { prover.sigma_commit(commits, index, rA_i, rB_i, rC_i, rD_i, a_i, Tk_i, Pk_i, Yk_i, sigma_i, proof); - } catch (const std::exception &) { + } catch (...) { return false; } return true; diff --git a/src/libspark/coin.cpp b/src/libspark/coin.cpp index 27ca2f56e0..785cc1de90 100644 --- a/src/libspark/coin.cpp +++ b/src/libspark/coin.cpp @@ -127,7 +127,7 @@ IdentifiedCoinData Coin::identify(const IncomingViewKey& incoming_view_key) { // Decrypt recipient data CDataStream stream = AEAD::decrypt_and_verify(this->K*incoming_view_key.get_s1(), "Mint coin data", this->r_); stream >> r; - } catch (const std::exception &) { + } catch (...) { throw std::runtime_error("Unable to identify coin"); } @@ -142,7 +142,7 @@ IdentifiedCoinData Coin::identify(const IncomingViewKey& incoming_view_key) { // Decrypt recipient data CDataStream stream = AEAD::decrypt_and_verify(this->K*incoming_view_key.get_s1(), "Spend coin data", this->r_); stream >> r; - } catch (const std::exception &) { + } catch (...) { throw std::runtime_error("Unable to identify coin"); } diff --git a/src/libspark/hash.cpp b/src/libspark/hash.cpp index 2c6d71317d..c37d29a1ea 100644 --- a/src/libspark/hash.cpp +++ b/src/libspark/hash.cpp @@ -83,7 +83,7 @@ Scalar Hash::finalize_scalar() { EVP_MD_CTX_free(state_finalize); return candidate; - } catch (const std::exception &) { + } catch (...) { counter++; } } @@ -144,7 +144,7 @@ GroupElement Hash::finalize_group() { EVP_MD_CTX_free(state_finalize); return candidate; - } catch (const std::exception &) { + } catch (...) { counter++; } } diff --git a/src/libspark/transcript.cpp b/src/libspark/transcript.cpp index 5cd67c63c0..8cada15b2e 100644 --- a/src/libspark/transcript.cpp +++ b/src/libspark/transcript.cpp @@ -139,7 +139,7 @@ Scalar Transcript::challenge(const std::string label) { EVP_MD_CTX_free(state_finalize); return candidate; - } catch (const std::exception &) { + } catch (...) { counter++; } } diff --git a/src/libspark/util.cpp b/src/libspark/util.cpp index 4547251320..cb3bed31fe 100644 --- a/src/libspark/util.cpp +++ b/src/libspark/util.cpp @@ -125,7 +125,7 @@ GroupElement SparkUtils::hash_generator(const std::string label) { EVP_MD_CTX_free(state_finalize); return candidate; - } catch (const std::exception &) { + } catch (...) { counter++; } } diff --git a/src/qt/transactiondesc.cpp b/src/qt/transactiondesc.cpp index 379d54dca6..c1bd864d17 100644 --- a/src/qt/transactiondesc.cpp +++ b/src/qt/transactiondesc.cpp @@ -267,7 +267,7 @@ QString TransactionDesc::toHTML(CWallet *wallet, CWalletTx &wtx, TransactionReco try { nTxFee = lelantus::ParseLelantusJoinSplit(*wtx.tx)->getFee(); } - catch (const std::exception &) { + catch (...) { //do nothing } } diff --git a/src/qt/transactionrecord.cpp b/src/qt/transactionrecord.cpp index 5360253cdc..ff6bb99e27 100644 --- a/src/qt/transactionrecord.cpp +++ b/src/qt/transactionrecord.cpp @@ -65,7 +65,7 @@ QList TransactionRecord::decomposeTransaction(const CWallet * if (isAllJoinSplitFromMe && wtx.tx->vin.size() > 0) { try { nTxFee = lelantus::ParseLelantusJoinSplit(*wtx.tx)->getFee(); - } catch (const std::exception &) { + } catch (...) { // do nothing } } diff --git a/src/rpc/rawtransaction.cpp b/src/rpc/rawtransaction.cpp index c2fe430098..38606f55ed 100644 --- a/src/rpc/rawtransaction.cpp +++ b/src/rpc/rawtransaction.cpp @@ -127,7 +127,7 @@ void TxToJSON(const CTransaction& tx, const uint256 hashBlock, UniValue& entry) try { jsplit = lelantus::ParseLelantusJoinSplit(tx); } - catch (const std::exception &) { + catch (...) { continue; } in.push_back(Pair("nFees", ValueFromAmount(jsplit->getFee()))); @@ -143,7 +143,7 @@ void TxToJSON(const CTransaction& tx, const uint256 hashBlock, UniValue& entry) try { sparkSpend = std::make_unique(spark::ParseSparkSpend(tx)); } - catch (const std::exception &) { + catch (...) { continue; } in.push_back(Pair("nFees", ValueFromAmount(sparkSpend->getFee()))); diff --git a/src/sigma.cpp b/src/sigma.cpp index 23236241e6..ae32d83b3a 100644 --- a/src/sigma.cpp +++ b/src/sigma.cpp @@ -457,13 +457,8 @@ bool CheckSigmaTransaction( if (allowSigma) { for (const CTxOut &txout : tx.vout) { if (!txout.scriptPubKey.empty() && txout.scriptPubKey.IsSigmaMint()) { - try { - if (!CheckSigmaMintTransaction(txout, state, hashTx, fStatefulSigmaCheck, sigmaTxInfo)) - return false; - } - catch (const std::exception &x) { - return state.Error(x.what()); - } + if (!CheckSigmaMintTransaction(txout, state, hashTx, fStatefulSigmaCheck, sigmaTxInfo)) + return false; } } } @@ -513,15 +508,10 @@ bool CheckSigmaTransaction( // Check vOut // Only one loop, we checked on the format before entering this case if (!isVerifyDB) { - try { - if (!CheckSigmaSpendTransaction( - tx, denominations, state, hashTx, isVerifyDB, nHeight, realHeight, - isCheckWallet, fStatefulSigmaCheck, sigmaTxInfo)) { - return false; - } - } - catch (const std::exception &x) { - return state.Error(x.what()); + if (!CheckSigmaSpendTransaction( + tx, denominations, state, hashTx, isVerifyDB, nHeight, realHeight, + isCheckWallet, fStatefulSigmaCheck, sigmaTxInfo)) { + return false; } } } @@ -675,7 +665,7 @@ bool GetOutPointFromBlock(COutPoint& outPoint, const GroupElement &pubCoinValue, txout.scriptPubKey.end()); try { txPubCoinValue.deserialize(&coin_serialised[0]); - } catch (const std::exception &) { + } catch (...) { return false; } if(pubCoinValue==txPubCoinValue){ diff --git a/src/spark/sparkwallet.cpp b/src/spark/sparkwallet.cpp index 36c1c7cdbe..01c632c813 100644 --- a/src/spark/sparkwallet.cpp +++ b/src/spark/sparkwallet.cpp @@ -260,7 +260,7 @@ bool CSparkWallet::isAddressMine(const std::string& encodedAddr) { spark::Address address(params); try { address.decode(encodedAddr); - } catch (const std::exception &) { + } catch (...) { return false; } @@ -273,7 +273,7 @@ bool CSparkWallet::isAddressMine(const std::string& encodedAddr) { try { d = viewKey.get_diversifier(address.get_d()); - } catch (const std::exception &) { + } catch (...) { return false; } @@ -437,7 +437,7 @@ bool CSparkWallet::getMintAmount(spark::Coin coin, CAmount& amount) { spark::IdentifiedCoinData identifiedCoinData; try { identifiedCoinData = coin.identify(this->viewKey); - } catch (const std::exception &) { + } catch (...) { return false; } amount = identifiedCoinData.v; @@ -501,7 +501,7 @@ void CSparkWallet::UpdateSpendStateFromBlock(const CBlock& block) { uint256 lTagHash = primitives::GetLTagHash(txLTag); UpdateSpendState(txLTag, lTagHash, txHash); } - } catch (const std::exception &) { + } catch (...) { } } } @@ -511,7 +511,7 @@ void CSparkWallet::UpdateSpendStateFromBlock(const CBlock& block) { bool CSparkWallet::isMine(spark::Coin coin) const { try { spark::IdentifiedCoinData identifiedCoinData = coin.identify(this->viewKey); - } catch (const std::exception &) { + } catch (...) { return false; } diff --git a/src/spark/state.cpp b/src/spark/state.cpp index c5e65cb7a9..3a301804bc 100644 --- a/src/spark/state.cpp +++ b/src/spark/state.cpp @@ -129,7 +129,7 @@ void ParseSparkMintTransaction(const std::vector& scripts, MintTransact } try { mintTransaction.setMintTransaction(serializedCoins); - } catch (const std::exception &) { + } catch (...) { throw std::invalid_argument("Unable to deserialize Spark mint transaction"); } } @@ -152,7 +152,7 @@ void ParseSparkMintCoin(const CScript& script, spark::Coin& txCoin) try { stream >> txCoin; - } catch (const std::exception &) { + } catch (...) { throw std::invalid_argument("Unable to deserialize Spark mint"); } } @@ -184,7 +184,7 @@ std::vector GetSparkUsedTags(const CTransaction &tx) spark::SpendTransaction spendTransaction(params); try { spendTransaction = ParseSparkSpend(tx); - } catch (const std::exception &) { + } catch (...) { return std::vector(); } @@ -205,7 +205,7 @@ std::vector GetSparkMintCoins(const CTransaction &tx) ParseSparkMintCoin(script, coin); coin.setSerialContext(serial_context); result.push_back(coin); - } catch (const std::exception &) { + } catch (...) { //Continue } } @@ -332,7 +332,7 @@ void RemoveSpendReferencingBlock(CTxMemPool& pool, CBlockIndex* blockIndex) { try { sparkSpend = std::make_unique(ParseSparkSpend(tx)); } - catch (const std::exception &) { + catch (...) { txn_to_remove.push_back(tx); break; } @@ -470,7 +470,7 @@ bool CheckSparkSMintTransaction( spark::Coin coin(Params::get_default()); ParseSparkMintCoin(script, coin); out_coins.push_back(coin); - } catch (const std::exception &) { + } catch (...) { return state.DoS(100, false, REJECT_INVALID, @@ -529,7 +529,7 @@ bool CheckSparkSpendTransaction( REJECT_MALFORMED, "CheckSparkSpendTransaction: invalid spend transaction"); } - catch (const std::exception &) { + catch (...) { return state.DoS(100, false, REJECT_MALFORMED, @@ -653,7 +653,7 @@ bool CheckSparkSpendTransaction( } else { try { passVerify = spark::SpendTransaction::verify(*spend, cover_sets); - } catch (const std::exception &) { + } catch (...) { passVerify = false; } } @@ -730,14 +730,9 @@ bool CheckSparkTransaction( } } if (!txOuts.empty()) { - try { - if (!CheckSparkMintTransaction(txOuts, state, hashTx, fStatefulSigmaCheck, sparkTxInfo)) { - LogPrintf("CheckSparkTransaction::Mint verification failed.\n"); - return false; - } - } - catch (const std::exception &x) { - return state.Error(x.what()); + if (!CheckSparkMintTransaction(txOuts, state, hashTx, fStatefulSigmaCheck, sparkTxInfo)) { + LogPrintf("CheckSparkTransaction::Mint verification failed.\n"); + return false; } } else { return state.DoS(100, false, @@ -755,15 +750,10 @@ bool CheckSparkTransaction( } if (!isVerifyDB) { - try { - if (!CheckSparkSpendTransaction( - tx, state, hashTx, isVerifyDB, nHeight, - isCheckWallet, fStatefulSigmaCheck, sparkTxInfo)) { - return false; - } - } - catch (const std::exception &x) { - return state.Error(x.what()); + if (!CheckSparkSpendTransaction( + tx, state, hashTx, isVerifyDB, nHeight, + isCheckWallet, fStatefulSigmaCheck, sparkTxInfo)) { + return false; } } } @@ -820,7 +810,7 @@ bool GetOutPointFromBlock(COutPoint& outPoint, const spark::Coin& coin, const CB try { ParseSparkMintCoin(txout.scriptPubKey, txCoin); } - catch (const std::exception &) { + catch (...) { continue; } if (coin == txCoin) { @@ -840,7 +830,7 @@ std::vector getSerialContext(const CTransaction &tx) { try { spark::SpendTransaction spend = ParseSparkSpend(tx); serialContextStream << spend.getUsedLTags(); - } catch (const std::exception &) { + } catch (...) { return std::vector(); } } else { diff --git a/src/test/lelantus_state_tests.cpp b/src/test/lelantus_state_tests.cpp index a48dc54c0e..a493271952 100644 --- a/src/test/lelantus_state_tests.cpp +++ b/src/test/lelantus_state_tests.cpp @@ -187,6 +187,9 @@ BOOST_AUTO_TEST_CASE(serial_adding) BOOST_CHECK(!lelantusState->IsUsedCoinSerial(serial2)); BOOST_CHECK(!lelantusState->IsUsedCoinSerialHash(receivedSerial, serialHash2)); + + // add serials to group that doesn't exist, should fail + BOOST_CHECK_THROW(lelantusState->AddSpend(Scalar(1), 100), std::invalid_argument); } BOOST_AUTO_TEST_CASE(mempool) diff --git a/src/validation.cpp b/src/validation.cpp index 7e9e3901c3..aad1ea69f1 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -862,7 +862,7 @@ bool AcceptToMemoryPoolWorker(CTxMemPool& pool, CValidationState& state, const C catch (CBadTxIn&) { return state.Invalid(false, REJECT_CONFLICT, "txn-invalid-lelantus-joinsplit"); } - catch (const std::exception &) { + catch (...) { return state.Invalid(false, REJECT_CONFLICT, "failed to deserialize joinsplit"); } @@ -905,7 +905,7 @@ bool AcceptToMemoryPoolWorker(CTxMemPool& pool, CValidationState& state, const C try { sparkUsedLTags = spark::GetSparkUsedTags(tx); } - catch (const std::exception &) { + catch (...) { return state.Invalid(false, REJECT_CONFLICT, "failed to deserialize spark spend"); } @@ -953,7 +953,7 @@ bool AcceptToMemoryPoolWorker(CTxMemPool& pool, CValidationState& state, const C try { sparkMintCoins = spark::GetSparkMintCoins(tx); } - catch (const std::exception &) { + catch (...) { return state.Invalid(false, REJECT_CONFLICT, "failed to deserialize spark mint"); } @@ -1149,7 +1149,7 @@ bool AcceptToMemoryPoolWorker(CTxMemPool& pool, CValidationState& state, const C catch (CBadTxIn&) { return state.DoS(0, false, REJECT_INVALID, "unable to parse joinsplit"); } - catch (const std::exception &) { + catch (...) { return state.DoS(0, false, REJECT_INVALID, "failed to deserialize joinsplit"); } } else { @@ -1159,7 +1159,7 @@ bool AcceptToMemoryPoolWorker(CTxMemPool& pool, CValidationState& state, const C catch (CBadTxIn&) { return state.DoS(0, false, REJECT_INVALID, "unable to parse joinsplit"); } - catch (const std::exception &) { + catch (...) { return state.DoS(0, false, REJECT_INVALID, "failed to deserialize joinsplit"); } } @@ -1593,14 +1593,7 @@ bool AcceptToMemoryPoolWithTime(CTxMemPool& pool, CValidationState &state, const { LogPrintf("AcceptToMemoryPool(), transaction: %s\n", tx->GetHash().ToString()); std::vector coins_to_uncache; - bool res = false; - try { - res = AcceptToMemoryPoolWorker(pool, state, tx, fLimitFree, pfMissingInputs, nAcceptTime, plTxnReplaced, fOverrideMempoolLimit, nAbsurdFee, coins_to_uncache, isCheckWalletTransaction, markFiroSpendTransactionSerial); - } - catch (const std::exception &x) { - state.Error(x.what()); - res = false; - } + bool res = AcceptToMemoryPoolWorker(pool, state, tx, fLimitFree, pfMissingInputs, nAcceptTime, plTxnReplaced, fOverrideMempoolLimit, nAbsurdFee, coins_to_uncache, isCheckWalletTransaction, markFiroSpendTransactionSerial); if (!res) { BOOST_FOREACH(const COutPoint& hashTx, coins_to_uncache) pcoinsTip->Uncache(hashTx); @@ -2092,7 +2085,7 @@ bool CheckTxInputs(const CTransaction& tx, CValidationState& state, const CCoins catch (CBadTxIn&) { return state.DoS(0, false, REJECT_INVALID, "unable to parse joinsplit"); } - catch (const std::exception &) { + catch (...) { return state.DoS(0, false, REJECT_INVALID, "failed to deserialize joinsplit"); } } @@ -2529,7 +2522,7 @@ static DisconnectResult DisconnectBlock(const CBlock& block, CValidationState& s try { nFees += lelantus::ParseLelantusJoinSplit(tx)->getFee(); } - catch (const std::exception &) { + catch (...) { // do nothing } } @@ -2537,7 +2530,7 @@ static DisconnectResult DisconnectBlock(const CBlock& block, CValidationState& s try { nFees = spark::ParseSparkSpend(tx).getFee(); } - catch (const std::exception &) { + catch (...) { // do nothing } } @@ -2927,7 +2920,7 @@ bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockIndex* pin catch (CBadTxIn&) { return state.DoS(0, false, REJECT_INVALID, "unable to parse joinsplit"); } - catch (const std::exception &) { + catch (...) { return state.DoS(0, false, REJECT_INVALID, "failed to deserialize joinsplit"); } } @@ -2939,7 +2932,7 @@ bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockIndex* pin catch (CBadTxIn&) { return state.DoS(0, false, REJECT_INVALID, "unable to parse spark spend"); } - catch (const std::exception &) { + catch (...) { return state.DoS(0, false, REJECT_INVALID, "failed to deserialize spark spend"); } } @@ -3495,7 +3488,7 @@ bool static DisconnectTip(CValidationState& state, const CChainParams& chainpara try { joinsplit = lelantus::ParseLelantusJoinSplit(*tx); } - catch (const std::exception &) { + catch (...) { continue; } diff --git a/src/wallet/rpcwallet.cpp b/src/wallet/rpcwallet.cpp index d2c74a34ae..97947e3b7f 100644 --- a/src/wallet/rpcwallet.cpp +++ b/src/wallet/rpcwallet.cpp @@ -2082,14 +2082,14 @@ UniValue gettransaction(const JSONRPCRequest& request) try { nFee = (0 - lelantus::ParseLelantusJoinSplit(*wtx.tx)->getFee()); } - catch (const std::exception &) { + catch (...) { // do nothing } } else if (wtx.tx->IsSparkSpend()) { try { nFee = (0 - spark::ParseSparkSpend(*wtx.tx).getFee()); } - catch (const std::exception &) { + catch (...) { // do nothing } } @@ -3440,7 +3440,7 @@ UniValue getsparkaddressbalance(const JSONRPCRequest& request) { unsigned char coinNetwork; try { coinNetwork = address.decode(strAddress); - } catch (const std::exception &) { + } catch (...) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, std::string("Invalid Spark address: ")+strAddress); } @@ -3562,7 +3562,7 @@ UniValue mintspark(const JSONRPCRequest& request) unsigned char coinNetwork; try { coinNetwork = address.decode(name_); - } catch (const std::exception &) { + } catch (...) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, std::string("Invalid Spark address: ")+name_); } @@ -3697,7 +3697,7 @@ UniValue spendspark(const JSONRPCRequest& request) isSparkAddress = true; if (coinNetwork != network) throw JSONRPCError(RPC_INVALID_PARAMETER, std::string("Invalid address, wrong network type: ")+name_); - } catch (const std::exception &) { + } catch (...) { isSparkAddress = false; } @@ -3771,7 +3771,7 @@ UniValue spendspark(const JSONRPCRequest& request) CWalletTx wtx; try { wtx = pwallet->SpendAndStoreSpark(recipients, privateRecipients, fee); - } catch (const std::exception &) { + } catch (...) { throw JSONRPCError(RPC_WALLET_ERROR, "Spark spend creation failed."); } @@ -3803,7 +3803,7 @@ UniValue lelantustospark(const JSONRPCRequest& request) { bool passed = false; try { passed = pwallet->LelantusToSpark(strFailReason); - } catch (const std::exception &) { + } catch (...) { throw JSONRPCError(RPC_WALLET_ERROR, "Lelantus to Spark failed!"); } if (!passed || strFailReason != "") @@ -4754,7 +4754,7 @@ UniValue listlelantusjoinsplits(const JSONRPCRequest& request) { std::unique_ptr joinsplit; try { joinsplit = lelantus::ParseLelantusJoinSplit(*pwtx->tx); - } catch (const std::exception &) { + } catch (...) { continue; } diff --git a/src/wallet/wallet.cpp b/src/wallet/wallet.cpp index d21d140ee5..a86adce8bb 100644 --- a/src/wallet/wallet.cpp +++ b/src/wallet/wallet.cpp @@ -1491,7 +1491,7 @@ bool CWallet::AbandonTransaction(const uint256& hashTx) try { joinsplit = lelantus::ParseLelantusJoinSplit(*wtx.tx); } - catch (const std::exception &) { + catch (...) { continue; } @@ -1517,7 +1517,7 @@ bool CWallet::AbandonTransaction(const uint256& hashTx) spark::SpendTransaction spend = spark::ParseSparkSpend(*wtx.tx); lTags = spend.getUsedLTags(); } - catch (const std::exception &) { + catch (...) { continue; } @@ -1672,7 +1672,7 @@ isminetype CWallet::IsMine(const CTxIn &txin, const CTransaction& tx) const try { joinsplit = lelantus::ParseLelantusJoinSplit(tx); } - catch (const std::exception &) { + catch (...) { return ISMINE_NO; } @@ -1687,7 +1687,7 @@ isminetype CWallet::IsMine(const CTxIn &txin, const CTransaction& tx) const spark::SpendTransaction spend = spark::ParseSparkSpend(tx); lTags = spend.getUsedLTags(); } - catch (const std::exception &) { + catch (...) { return ISMINE_NO; } if (!sparkWallet) @@ -1745,7 +1745,7 @@ CAmount CWallet::GetDebit(const CTxIn &txin, const CTransaction& tx, const ismin try { joinsplit = lelantus::ParseLelantusJoinSplit(tx); } - catch (const std::exception &) { + catch (...) { goto end; } @@ -1767,7 +1767,7 @@ CAmount CWallet::GetDebit(const CTxIn &txin, const CTransaction& tx, const ismin spark::SpendTransaction spend = spark::ParseSparkSpend(tx); lTags = spend.getUsedLTags(); } - catch (const std::exception &) { + catch (...) { goto end; } if (!sparkWallet) @@ -1929,7 +1929,7 @@ CAmount CWallet::GetChange(const uint256& tx, const CTxOut &txout) const try { spark::ParseSparkMintCoin(txout.scriptPubKey, coin); coin.setSerialContext(serial_context); - } catch (const std::exception &) { + } catch (...) { return 0; } return sparkWallet->getMyCoinV(coin); @@ -2289,14 +2289,14 @@ void CWalletTx::GetAmounts(std::list& listReceived, try { nFee = lelantus::ParseLelantusJoinSplit(*tx)->getFee(); } - catch (const std::exception &) { + catch (...) { // do nothing } } else if (tx->IsSparkSpend()) { try { nFee = spark::ParseSparkSpend(*tx).getFee(); } - catch (const std::exception &) { + catch (...) { // do nothing } } else { @@ -2745,7 +2745,7 @@ bool CWalletTx::IsChange(uint32_t out) const { try { spark::ParseSparkMintCoin(tx->vout[out].scriptPubKey, coin); coin.setSerialContext(serial_context); - } catch (const std::exception &) { + } catch (...) { return false; } return pwallet->sparkWallet->getMyCoinIsChange(coin); @@ -5627,7 +5627,7 @@ bool CWallet::CommitSigmaTransaction(CWalletTx& wtxNew, std::vector CValidationState state; CReserveKey reserveKey(this); CommitTransaction(wtxNew, reserveKey, g_connman.get(), state); - } catch (const std::exception &) { + } catch (...) { auto error = _( "Error: The transaction was rejected! This might happen if some of " "the coins in your wallet were already spent, such as if you used " @@ -5776,7 +5776,7 @@ CWalletTx CWallet::SpendAndStoreSpark( CValidationState state; CReserveKey reserveKey(this); CommitTransaction(result, reserveKey, g_connman.get(), state); - } catch (const std::exception &) { + } catch (...) { auto error = _( "Error: The transaction was rejected! This might happen if some of " "the coins in your wallet were already spent, such as if you used " @@ -5937,7 +5937,7 @@ bool CWallet::CommitLelantusTransaction(CWalletTx& wtxNew, std::vector