diff --git a/.github/workflows/ci-master.yml b/.github/workflows/ci-master.yml index f0e631ccec..9a69ac704a 100644 --- a/.github/workflows/ci-master.yml +++ b/.github/workflows/ci-master.yml @@ -141,7 +141,7 @@ jobs: working-directory: ${{ env.SOURCE_ARTIFACT }} - name: Build Firo run: | - ./configure --disable-jni --enable-elysium --prefix=$(realpath depends/x86_64-w64-mingw32) + ./configure --without-libs --disable-jni --enable-elysium --prefix=$(realpath depends/x86_64-w64-mingw32) make -j$(nproc) working-directory: ${{ env.SOURCE_ARTIFACT }} - name: Prepare Files for Artifact @@ -173,6 +173,9 @@ jobs: run: sudo xcode-select -s /Applications/Xcode.app/Contents/Developer - name: Install Required Packages run: brew install automake coreutils pkg-config + # Workaround for macOS: https://github.com/actions/runner/issues/2958 + - name: Install setuptools + run: sudo -H pip install setuptools - name: Build Dependencies run: make -C depends -j$(sysctl -n hw.activecpu) working-directory: ${{ env.SOURCE_ARTIFACT }} diff --git a/configure.ac b/configure.ac index e3d761a421..53d504a5da 100644 --- a/configure.ac +++ b/configure.ac @@ -3,7 +3,7 @@ AC_PREREQ([2.60]) define(_CLIENT_VERSION_MAJOR, 0) define(_CLIENT_VERSION_MINOR, 14) define(_CLIENT_VERSION_REVISION, 12) -define(_CLIENT_VERSION_BUILD, 5) +define(_CLIENT_VERSION_BUILD, 6) define(_CLIENT_VERSION_IS_RELEASE, true) define(_COPYRIGHT_YEAR, 2023) define(_COPYRIGHT_HOLDERS,[The %s developers]) diff --git a/src/chainparams.cpp b/src/chainparams.cpp index 6308480537..6cc42204d2 100644 --- a/src/chainparams.cpp +++ b/src/chainparams.cpp @@ -443,7 +443,7 @@ class CMainParams : public CChainParams { consensus.evoSporkKeyID = "a78fERshquPsTv2TuKMSsxTeKom56uBwLP"; consensus.nEvoSporkStartBlock = ZC_LELANTUS_STARTING_BLOCK; - consensus.nEvoSporkStopBlock = AdjustEndingBlockNumberAfterSubsidyHalving(ZC_LELANTUS_STARTING_BLOCK, 3*24*12*365, 486221); // =818275, three years after lelantus + consensus.nEvoSporkStopBlock = AdjustEndingBlockNumberAfterSubsidyHalving(ZC_LELANTUS_STARTING_BLOCK, 4*24*12*365, 486221); // =1028515, four years after lelantus, one year after spark consensus.nEvoSporkStopBlockExtensionVersion = 140903; consensus.nEvoSporkStopBlockPrevious = ZC_LELANTUS_STARTING_BLOCK + 1*24*12*365; // one year after lelantus consensus.nEvoSporkStopBlockExtensionGracefulPeriod = 24*12*14; // two weeks diff --git a/src/clientversion.h b/src/clientversion.h index 7b7cab614c..a2d9aa8cbb 100644 --- a/src/clientversion.h +++ b/src/clientversion.h @@ -17,7 +17,7 @@ #define CLIENT_VERSION_MAJOR 0 #define CLIENT_VERSION_MINOR 14 #define CLIENT_VERSION_REVISION 12 -#define CLIENT_VERSION_BUILD 5 +#define CLIENT_VERSION_BUILD 6 //! Set to true for release, false for prerelease or test build #define CLIENT_VERSION_IS_RELEASE true diff --git a/src/crypto/progpow.h b/src/crypto/progpow.h index ba484d287d..6b1f7b5e4a 100644 --- a/src/crypto/progpow.h +++ b/src/crypto/progpow.h @@ -45,4 +45,4 @@ uint256 progpow_hash_full(const CProgPowHeader& header, uint256& mix_hash); /* Performs a light progpow hash (DAG loops excluded) provided header has mix_hash */ uint256 progpow_hash_light(const CProgPowHeader& header); -#endif // FIRO_PROGPOW_H +#endif // FIRO_PROGPOW_H \ No newline at end of file diff --git a/src/firo_params.h b/src/firo_params.h index 37e0179c5c..e86189b2ee 100644 --- a/src/firo_params.h +++ b/src/firo_params.h @@ -180,9 +180,9 @@ static const int64_t DUST_HARD_LIMIT = 1000; // 0.00001 FIRO mininput #define DANDELION_FLUFF 10 // Spark -#define SPARK_START_BLOCK 900000 +#define SPARK_START_BLOCK 819300 // Approx Jan 18 2024 8:00 AM UTC #define SPARK_TESTNET_START_BLOCK 107000 -#define LELANTUS_GRACEFUL_PERIOD 950000 +#define LELANTUS_GRACEFUL_PERIOD 1223500 // Approx Jan 30 2026 #define LELANTUS_TESTNET_GRACEFUL_PERIOD 140000 // Versions of zerocoin mint/spend transactions diff --git a/src/fuzz/FuzzedDataProvider.h b/src/fuzz/FuzzedDataProvider.h new file mode 100644 index 0000000000..9f66afc9e7 --- /dev/null +++ b/src/fuzz/FuzzedDataProvider.h @@ -0,0 +1,398 @@ +//===- FuzzedDataProvider.h - Utility header for fuzz targets ---*- C++ -* ===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// A single header library providing an utility class to break up an array of +// bytes. Whenever run on the same input, provides the same output, as long as +// its methods are called in the same order, with the same arguments. +//===----------------------------------------------------------------------===// + +#ifndef LLVM_FUZZER_FUZZED_DATA_PROVIDER_H_ +#define LLVM_FUZZER_FUZZED_DATA_PROVIDER_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +// In addition to the comments below, the API is also briefly documented at +// https://github.com/google/fuzzing/blob/master/docs/split-inputs.md#fuzzed-data-provider +class FuzzedDataProvider { + public: + // |data| is an array of length |size| that the FuzzedDataProvider wraps to + // provide more granular access. |data| must outlive the FuzzedDataProvider. + FuzzedDataProvider(const uint8_t *data, size_t size) + : data_ptr_(data), remaining_bytes_(size) {} + ~FuzzedDataProvider() = default; + + // See the implementation below (after the class definition) for more verbose + // comments for each of the methods. + + // Methods returning std::vector of bytes. These are the most popular choice + // when splitting fuzzing input into pieces, as every piece is put into a + // separate buffer (i.e. ASan would catch any under-/overflow) and the memory + // will be released automatically. + template std::vector ConsumeBytes(size_t num_bytes); + template + std::vector ConsumeBytesWithTerminator(size_t num_bytes, T terminator = 0); + template std::vector ConsumeRemainingBytes(); + + // Methods returning strings. Use only when you need a std::string or a null + // terminated C-string. Otherwise, prefer the methods returning std::vector. + std::string ConsumeBytesAsString(size_t num_bytes); + std::string ConsumeRandomLengthString(size_t max_length); + std::string ConsumeRandomLengthString(); + std::string ConsumeRemainingBytesAsString(); + + // Methods returning integer values. + template T ConsumeIntegral(); + template T ConsumeIntegralInRange(T min, T max); + + // Methods returning floating point values. + template T ConsumeFloatingPoint(); + template T ConsumeFloatingPointInRange(T min, T max); + + // 0 <= return value <= 1. + template T ConsumeProbability(); + + bool ConsumeBool(); + + // Returns a value chosen from the given enum. + template T ConsumeEnum(); + + // Returns a value from the given array. + template T PickValueInArray(const T (&array)[size]); + template + T PickValueInArray(const std::array &array); + template T PickValueInArray(std::initializer_list list); + + // Writes data to the given destination and returns number of bytes written. + size_t ConsumeData(void *destination, size_t num_bytes); + + // Reports the remaining bytes available for fuzzed input. + size_t remaining_bytes() { return remaining_bytes_; } + + private: + FuzzedDataProvider(const FuzzedDataProvider &) = delete; + FuzzedDataProvider &operator=(const FuzzedDataProvider &) = delete; + + void CopyAndAdvance(void *destination, size_t num_bytes); + + void Advance(size_t num_bytes); + + template + std::vector ConsumeBytes(size_t size, size_t num_bytes); + + template TS ConvertUnsignedToSigned(TU value); + + const uint8_t *data_ptr_; + size_t remaining_bytes_; +}; + +// Returns a std::vector containing |num_bytes| of input data. If fewer than +// |num_bytes| of data remain, returns a shorter std::vector containing all +// of the data that's left. Can be used with any byte sized type, such as +// char, unsigned char, uint8_t, etc. +template +std::vector FuzzedDataProvider::ConsumeBytes(size_t num_bytes) { + num_bytes = std::min(num_bytes, remaining_bytes_); + return ConsumeBytes(num_bytes, num_bytes); +} + +// Similar to |ConsumeBytes|, but also appends the terminator value at the end +// of the resulting vector. Useful, when a mutable null-terminated C-string is +// needed, for example. But that is a rare case. Better avoid it, if possible, +// and prefer using |ConsumeBytes| or |ConsumeBytesAsString| methods. +template +std::vector FuzzedDataProvider::ConsumeBytesWithTerminator(size_t num_bytes, + T terminator) { + num_bytes = std::min(num_bytes, remaining_bytes_); + std::vector result = ConsumeBytes(num_bytes + 1, num_bytes); + result.back() = terminator; + return result; +} + +// Returns a std::vector containing all remaining bytes of the input data. +template +std::vector FuzzedDataProvider::ConsumeRemainingBytes() { + return ConsumeBytes(remaining_bytes_); +} + +// Returns a std::string containing |num_bytes| of input data. Using this and +// |.c_str()| on the resulting string is the best way to get an immutable +// null-terminated C string. If fewer than |num_bytes| of data remain, returns +// a shorter std::string containing all of the data that's left. +inline std::string FuzzedDataProvider::ConsumeBytesAsString(size_t num_bytes) { + static_assert(sizeof(std::string::value_type) == sizeof(uint8_t), + "ConsumeBytesAsString cannot convert the data to a string."); + + num_bytes = std::min(num_bytes, remaining_bytes_); + std::string result( + reinterpret_cast(data_ptr_), num_bytes); + Advance(num_bytes); + return result; +} + +// Returns a std::string of length from 0 to |max_length|. When it runs out of +// input data, returns what remains of the input. Designed to be more stable +// with respect to a fuzzer inserting characters than just picking a random +// length and then consuming that many bytes with |ConsumeBytes|. +inline std::string +FuzzedDataProvider::ConsumeRandomLengthString(size_t max_length) { + // Reads bytes from the start of |data_ptr_|. Maps "\\" to "\", and maps "\" + // followed by anything else to the end of the string. As a result of this + // logic, a fuzzer can insert characters into the string, and the string + // will be lengthened to include those new characters, resulting in a more + // stable fuzzer than picking the length of a string independently from + // picking its contents. + std::string result; + + // Reserve the anticipated capaticity to prevent several reallocations. + result.reserve(std::min(max_length, remaining_bytes_)); + for (size_t i = 0; i < max_length && remaining_bytes_ != 0; ++i) { + char next = ConvertUnsignedToSigned(data_ptr_[0]); + Advance(1); + if (next == '\\' && remaining_bytes_ != 0) { + next = ConvertUnsignedToSigned(data_ptr_[0]); + Advance(1); + if (next != '\\') + break; + } + result += next; + } + + result.shrink_to_fit(); + return result; +} + +// Returns a std::string of length from 0 to |remaining_bytes_|. +inline std::string FuzzedDataProvider::ConsumeRandomLengthString() { + return ConsumeRandomLengthString(remaining_bytes_); +} + +// Returns a std::string containing all remaining bytes of the input data. +// Prefer using |ConsumeRemainingBytes| unless you actually need a std::string +// object. +inline std::string FuzzedDataProvider::ConsumeRemainingBytesAsString() { + return ConsumeBytesAsString(remaining_bytes_); +} + +// Returns a number in the range [Type's min, Type's max]. The value might +// not be uniformly distributed in the given range. If there's no input data +// left, always returns |min|. +template T FuzzedDataProvider::ConsumeIntegral() { + return ConsumeIntegralInRange(std::numeric_limits::min(), + std::numeric_limits::max()); +} + +// Returns a number in the range [min, max] by consuming bytes from the +// input data. The value might not be uniformly distributed in the given +// range. If there's no input data left, always returns |min|. |min| must +// be less than or equal to |max|. +template +T FuzzedDataProvider::ConsumeIntegralInRange(T min, T max) { + static_assert(std::is_integral::value, "An integral type is required."); + static_assert(sizeof(T) <= sizeof(uint64_t), "Unsupported integral type."); + + if (min > max) + abort(); + + // Use the biggest type possible to hold the range and the result. + uint64_t range = static_cast(max) - min; + uint64_t result = 0; + size_t offset = 0; + + while (offset < sizeof(T) * CHAR_BIT && (range >> offset) > 0 && + remaining_bytes_ != 0) { + // Pull bytes off the end of the seed data. Experimentally, this seems to + // allow the fuzzer to more easily explore the input space. This makes + // sense, since it works by modifying inputs that caused new code to run, + // and this data is often used to encode length of data read by + // |ConsumeBytes|. Separating out read lengths makes it easier modify the + // contents of the data that is actually read. + --remaining_bytes_; + result = (result << CHAR_BIT) | data_ptr_[remaining_bytes_]; + offset += CHAR_BIT; + } + + // Avoid division by 0, in case |range + 1| results in overflow. + if (range != std::numeric_limits::max()) + result = result % (range + 1); + + return static_cast(min + result); +} + +// Returns a floating point value in the range [Type's lowest, Type's max] by +// consuming bytes from the input data. If there's no input data left, always +// returns approximately 0. +template T FuzzedDataProvider::ConsumeFloatingPoint() { + return ConsumeFloatingPointInRange(std::numeric_limits::lowest(), + std::numeric_limits::max()); +} + +// Returns a floating point value in the given range by consuming bytes from +// the input data. If there's no input data left, returns |min|. Note that +// |min| must be less than or equal to |max|. +template +T FuzzedDataProvider::ConsumeFloatingPointInRange(T min, T max) { + if (min > max) + abort(); + + T range = .0; + T result = min; + constexpr T zero(.0); + if (max > zero && min < zero && max > min + std::numeric_limits::max()) { + // The diff |max - min| would overflow the given floating point type. Use + // the half of the diff as the range and consume a bool to decide whether + // the result is in the first of the second part of the diff. + range = (max / 2.0) - (min / 2.0); + if (ConsumeBool()) { + result += range; + } + } else { + range = max - min; + } + + return result + range * ConsumeProbability(); +} + +// Returns a floating point number in the range [0.0, 1.0]. If there's no +// input data left, always returns 0. +template T FuzzedDataProvider::ConsumeProbability() { + static_assert(std::is_floating_point::value, + "A floating point type is required."); + + // Use different integral types for different floating point types in order + // to provide better density of the resulting values. + using IntegralType = + typename std::conditional<(sizeof(T) <= sizeof(uint32_t)), uint32_t, + uint64_t>::type; + + T result = static_cast(ConsumeIntegral()); + result /= static_cast(std::numeric_limits::max()); + return result; +} + +// Reads one byte and returns a bool, or false when no data remains. +inline bool FuzzedDataProvider::ConsumeBool() { + return 1 & ConsumeIntegral(); +} + +// Returns an enum value. The enum must start at 0 and be contiguous. It must +// also contain |kMaxValue| aliased to its largest (inclusive) value. Such as: +// enum class Foo { SomeValue, OtherValue, kMaxValue = OtherValue }; +template T FuzzedDataProvider::ConsumeEnum() { + static_assert(std::is_enum::value, "|T| must be an enum type."); + return static_cast( + ConsumeIntegralInRange(0, static_cast(T::kMaxValue))); +} + +// Returns a copy of the value selected from the given fixed-size |array|. +template +T FuzzedDataProvider::PickValueInArray(const T (&array)[size]) { + static_assert(size > 0, "The array must be non empty."); + return array[ConsumeIntegralInRange(0, size - 1)]; +} + +template +T FuzzedDataProvider::PickValueInArray(const std::array &array) { + static_assert(size > 0, "The array must be non empty."); + return array[ConsumeIntegralInRange(0, size - 1)]; +} + +template +T FuzzedDataProvider::PickValueInArray(std::initializer_list list) { + // TODO(Dor1s): switch to static_assert once C++14 is allowed. + if (!list.size()) + abort(); + + return *(list.begin() + ConsumeIntegralInRange(0, list.size() - 1)); +} + +// Writes |num_bytes| of input data to the given destination pointer. If there +// is not enough data left, writes all remaining bytes. Return value is the +// number of bytes written. +// In general, it's better to avoid using this function, but it may be useful +// in cases when it's necessary to fill a certain buffer or object with +// fuzzing data. +inline size_t FuzzedDataProvider::ConsumeData(void *destination, + size_t num_bytes) { + num_bytes = std::min(num_bytes, remaining_bytes_); + CopyAndAdvance(destination, num_bytes); + return num_bytes; +} + +// Private methods. +inline void FuzzedDataProvider::CopyAndAdvance(void *destination, + size_t num_bytes) { + std::memcpy(destination, data_ptr_, num_bytes); + Advance(num_bytes); +} + +inline void FuzzedDataProvider::Advance(size_t num_bytes) { + if (num_bytes > remaining_bytes_) + abort(); + + data_ptr_ += num_bytes; + remaining_bytes_ -= num_bytes; +} + +template +std::vector FuzzedDataProvider::ConsumeBytes(size_t size, size_t num_bytes) { + static_assert(sizeof(T) == sizeof(uint8_t), "Incompatible data type."); + + // The point of using the size-based constructor below is to increase the + // odds of having a vector object with capacity being equal to the length. + // That part is always implementation specific, but at least both libc++ and + // libstdc++ allocate the requested number of bytes in that constructor, + // which seems to be a natural choice for other implementations as well. + // To increase the odds even more, we also call |shrink_to_fit| below. + std::vector result(size); + if (size == 0) { + if (num_bytes != 0) + abort(); + return result; + } + + CopyAndAdvance(result.data(), num_bytes); + + // Even though |shrink_to_fit| is also implementation specific, we expect it + // to provide an additional assurance in case vector's constructor allocated + // a buffer which is larger than the actual amount of data we put inside it. + result.shrink_to_fit(); + return result; +} + +template +TS FuzzedDataProvider::ConvertUnsignedToSigned(TU value) { + static_assert(sizeof(TS) == sizeof(TU), "Incompatible data types."); + static_assert(!std::numeric_limits::is_signed, + "Source type must be unsigned."); + + // TODO(Dor1s): change to `if constexpr` once C++17 becomes mainstream. + if (std::numeric_limits::is_modulo) + return static_cast(value); + + // Avoid using implementation-defined unsigned to signed conversions. + // To learn more, see https://stackoverflow.com/questions/13150449. + if (value <= std::numeric_limits::max()) { + return static_cast(value); + } else { + constexpr auto TS_min = std::numeric_limits::min(); + return TS_min + static_cast(value - TS_min); + } +} + +#endif // LLVM_FUZZER_FUZZED_DATA_PROVIDER_H_ + diff --git a/src/fuzz/Makefile b/src/fuzz/Makefile new file mode 100644 index 0000000000..a2d2979e79 --- /dev/null +++ b/src/fuzz/Makefile @@ -0,0 +1,134 @@ +CXX := hfuzz-clang++ + +CXXFLAGS := -DHAVE_CONFIG_H -I../../src/ -iquote ../../src/config/ -iquote ../secp256k1/ -iquote ../secp256k1/src/ -iquote ../secp256k1/include/ +CXXFLAGS2 := -DHAVE_CONFIG_H + +LIBS := -lcrypto -lstdc++ -lboost_thread -lboost_filesystem -lboost_program_options -lboost_chrono +LIBS2 := -lstdc++ -lcrypto + +INCLUDE_HEADER := -include ../streams.h -include ../version.h + +BPPLUS_SRCS := libspark/bpplus_fuzz.cpp ../libspark/bpplus.cpp ../libspark/util.cpp fuzzing_utilities.cpp ../libspark/hash.cpp ../libspark/kdf.cpp ../libspark/transcript.cpp ../crypto/aes.cpp ../crypto/chacha20.cpp ../crypto/sha512.cpp ../secp256k1/src/cpp/Scalar.cpp ../secp256k1/src/cpp/GroupElement.cpp ../secp256k1/src/cpp/MultiExponent.cpp ../support/cleanse.cpp ../util.cpp ../utiltime.cpp ../utilstrencodings.cpp ../random.cpp ../chainparamsbase.cpp +BPPLUS_OUTPUT := libspark/bpplus_hfuzz +BPPLUS_OUTPUT_DEBUG := libspark/bpplus_debug + +BECH32_SRCS := libspark/bech32_fuzz_2.cpp ../libspark/bech32.cpp +BECH32_OUTPUT := libspark/bech32_hfuzz +BECH32_OUTPUT_DEBUG := libspark/bech32_debug + +AEAD_SRCS := libspark/aead_fuzz.cpp ../libspark/aead.cpp ../libspark/util.cpp ../libspark/kdf.cpp ../libspark/hash.cpp ../fuzz/fuzzing_utilities.cpp ../crypto/aes.cpp ../support/lockedpool.cpp ../support/cleanse.cpp ../secp256k1/src/cpp/Scalar.cpp ../secp256k1/src/cpp/GroupElement.cpp +AEAD_OUTPUT := libspark/aead_hfuzz +AEAD_OUTPUT_DEBUG := libspark/aead_debug + +GROOTLE_SRCS := libspark/grootle_fuzz.cpp ../libspark/grootle.cpp ../libspark/util.cpp fuzzing_utilities.cpp ../libspark/hash.cpp ../libspark/kdf.cpp ../libspark/transcript.cpp ../crypto/aes.cpp ../crypto/chacha20.cpp ../crypto/sha512.cpp ../secp256k1/src/cpp/Scalar.cpp ../secp256k1/src/cpp/GroupElement.cpp ../secp256k1/src/cpp/MultiExponent.cpp ../support/cleanse.cpp ../util.cpp ../utiltime.cpp ../utilstrencodings.cpp ../random.cpp ../chainparamsbase.cpp +GROOTLE_OUTPUT := libspark/grootle_hfuzz +GROOTLE_OUTPUT_DEBUG := libspark/grootle_debug + +CHAUM_SRCS := libspark/chaum_fuzz.cpp ../libspark/chaum.cpp ../libspark/transcript.cpp fuzzing_utilities.cpp ../secp256k1/src/cpp/Scalar.cpp ../secp256k1/src/cpp/GroupElement.cpp ../secp256k1/src/cpp/MultiExponent.cpp ../support/cleanse.cpp +CHAUM_OUTPUT := libspark/chaum_hfuzz +CHAUM_OUTPUT_DEBUG := libspark/chaum_debug + +SCHNORR_SRCS := libspark/schnorr_fuzz.cpp ../libspark/schnorr.cpp ../fuzz/fuzzing_utilities.cpp ../secp256k1/src/cpp/Scalar.cpp ../secp256k1/src/cpp/GroupElement.cpp ../secp256k1/src/cpp/MultiExponent.cpp ../libspark/transcript.cpp ../support/cleanse.cpp +SCHNORR_OUTPUT := libspark/schnorr_hfuzz +SCHNORR_OUTPUT_DEBUG := libspark/schnorr_debug + +COIN_SRCS := libspark/coin_fuzz.cpp ../libspark/coin.cpp ../libspark/params.cpp ../crypto/aes.cpp ../crypto/ripemd160.cpp ../crypto/sha256.cpp ../secp256k1/src/cpp/Scalar.cpp ../secp256k1/src/cpp/GroupElement.cpp ../secp256k1/src/cpp/MultiExponent.cpp ../support/*.cpp ../uint256.cpp ../utilstrencodings.cpp fuzzing_utilities.cpp ../libspark/aead.cpp ../libspark/util.cpp ../libspark/keys.cpp ../libspark/f4grumble.cpp ../libspark/hash.cpp ../libspark/bech32.cpp ../libspark/kdf.cpp +COIN_OUTPUT := libspark/coin_hfuzz +COIN_OUTPUT_DEBUG := libspark/coin_debug + +MINT_TRANSACTION_SRCS := libspark/mint_transaction_fuzz.cpp ../libspark/mint_transaction.cpp ../libspark/coin.cpp ../libspark/keys.cpp ../libspark/schnorr.cpp ../fuzz/fuzzing_utilities.cpp ../libspark/util.cpp ../libspark/hash.cpp ../libspark/kdf.cpp ../libspark/transcript.cpp ../libspark/f4grumble.cpp ../libspark/params.cpp ../libspark/bech32.cpp ../libspark/aead.cpp ../crypto/aes.cpp ../crypto/ripemd160.cpp ../crypto/sha256.cpp ../secp256k1/src/cpp/Scalar.cpp ../secp256k1/src/cpp/GroupElement.cpp ../secp256k1/src/cpp/MultiExponent.cpp ../support/cleanse.cpp ../uint256.cpp ../utilstrencodings.cpp +MINT_TRANSACTION_OUTPUT := libspark/mint_transaction_hfuzz +MINT_TRANSACTION_OUTPUT_DEBUG := libspark/mint_transaction_debug + +SPEND_TRANSACTION_SRCS := libspark/spend_transaction_fuzz.cpp ../libspark/spend_transaction.cpp ../libspark/coin.cpp ../libspark/keys.cpp ../libspark/schnorr.cpp ../fuzz/fuzzing_utilities.cpp ../libspark/util.cpp ../libspark/hash.cpp ../libspark/kdf.cpp ../libspark/transcript.cpp ../libspark/f4grumble.cpp ../libspark/params.cpp ../libspark/bech32.cpp ../libspark/aead.cpp ../libspark/chaum.cpp ../libspark/bpplus.cpp ../libspark/grootle.cpp ../crypto/aes.cpp ../crypto/ripemd160.cpp ../crypto/sha256.cpp ../crypto/chacha20.cpp ../crypto/sha512.cpp ../secp256k1/src/cpp/Scalar.cpp ../secp256k1/src/cpp/GroupElement.cpp ../secp256k1/src/cpp/MultiExponent.cpp ../support/cleanse.cpp ../uint256.cpp ../utilstrencodings.cpp ../util.cpp ../utiltime.cpp ../chainparamsbase.cpp ../random.cpp +SPEND_TRANSACTION_OUTPUT := libspark/spend_transaction_hfuzz +SPEND_TRANSACTION_OUTPUT_DEBUG := libspark/spend_transaction_debug + +F4GRUMBLE_SRCS := libspark/f4grumble_fuzz.cpp ../libspark/f4grumble.cpp ../libspark/util.cpp ../libspark/kdf.cpp ../libspark/hash.cpp ../crypto/aes.cpp ../support/lockedpool.cpp ../support/cleanse.cpp ../secp256k1/src/cpp/Scalar.cpp ../secp256k1/src/cpp/GroupElement.cpp +F4GRUMBLE_OUTPUT := libspark/f4grumble_hfuzz +F4GRUMBLE_OUTPUT_DEBUG := libspark/f4grumble_debug + +DEBUG_FLAGS := -g -O0 -ggdb + +bpplus: $(BPPLUS_OUTPUT) +$(BPPLUS_OUTPUT): $(BPPLUS_SRCS) + $(CXX) $(CXXFLAGS) $^ -o $@ $(LIBS) + +bpplus_debug: $(BPPLUS_OUTPUT_DEBUG) +$(BPPLUS_OUTPUT_DEBUG): $(BPPLUS_SRCS) + $(CXX) $(DEBUG_FLAGS) $(CXXFLAGS) $^ -o $@ $(LIBS) + +bech32: $(BECH32_OUTPUT) +$(BECH32_OUTPUT): $(BECH32_SRCS) + $(CXX) $(CXXFLAGS2) $^ -o $@ $(LIBS2) + +bech32_debug: $(BECH32_OUTPUT_DEBUG) +$(BECH32_OUTPUT_DEBUG): $(BECH32_SRCS) + $(CXX) $(DEBUG_FLAGS) $(CXXFLAGS2) $^ -o $@ $(LIBS2) + +aead: $(AEAD_OUTPUT) +$(AEAD_OUTPUT): $(AEAD_SRCS) + $(CXX) $(CXXFLAGS) $^ -o $@ $(LIBS2) + +aead_debug: $(AEAD_OUTPUT_DEBUG) +$(AEAD_OUTPUT_DEBUG): $(AEAD_SRCS) + $(CXX) $(DEBUG_FLAGS) $(CXXFLAGS) $^ -o $@ $(LIBS2) + +grootle: $(GROOTLE_OUTPUT) +$(GROOTLE_OUTPUT): $(GROOTLE_SRCS) + $(CXX) $(CXXFLAGS) $^ -o $@ $(LIBS) + +grootle_debug: $(GROOTLE_OUTPUT_DEBUG) +$(GROOTLE_OUTPUT_DEBUG): $(GROOTLE_SRCS) + $(CXX) $(DEBUG_FLAGS) $(CXXFLAGS) $^ -o $@ $(LIBS) + +chaum: $(CHAUM_OUTPUT) +$(CHAUM_OUTPUT): $(CHAUM_SRCS) + $(CXX) $(CXXFLAGS) $^ -o $@ $(INCLUDE_HEADER) $(LIBS) + +chaum_debug: $(CHAUM_OUTPUT_DEBUG) +$(CHAUM_OUTPUT_DEBUG): $(CHAUM_SRCS) + $(CXX) $(DEBUG_FLAGS) $(CXXFLAGS2) $^ -o $@ $(INCLUDE_HEADER) $(LIBS) + +schnorr: $(SCHNORR_OUTPUT) +$(SCHNORR_OUTPUT): $(SCHNORR_SRCS) + $(CXX) $(CXXFLAGS) $^ -o $@ $(INCLUDE_HEADER) $(LIBS) + +schnorr_debug: $(SCHNORR_OUTPUT_DEBUG) +$(SCHNORR_OUTPUT_DEBUG): $(SCHNORR_SRCS) + $(CXX) $(DEBUG_FLAGS) $(CXXFLAGS) $^ -o $@ $(INCLUDE_HEADER) $(LIBS) + +coin: $(COIN_OUTPUT) +$(COIN_OUTPUT): $(COIN_SRCS) + $(CXX) $(CXXFLAGS) $^ -o $@ $(LIBS) + +coin_debug: $(COIN_OUTPUT_DEBUG) +$(COIN_OUTPUT_DEBUG): $(COIN_SRCS) + $(CXX) $(DEBUG_FLAGS) $(CXXFLAGS) $^ -o $@ $(LIBS) + +mint_transaction: $(MINT_TRANSACTION_OUTPUT) +$(MINT_TRANSACTION_OUTPUT): $(MINT_TRANSACTION_SRCS) + $(CXX) $(CXXFLAGS) $^ -o $@ $(LIBS2) + +mint_transaction_debug: $(MINT_TRANSACTION_OUTPUT_DEBUG) +$(MINT_TRANSACTION_OUTPUT_DEBUG): $(MINT_TRANSACTION_SRCS) + $(CXX) $(DEBUG_FLAGS) $(CXXFLAGS2) $^ -o $@ $(LIBS2) + +spend_transaction: $(SPEND_TRANSACTION_OUTPUT) +$(SPEND_TRANSACTION_OUTPUT): $(SPEND_TRANSACTION_SRCS) + $(CXX) $(CXXFLAGS) $^ -o $@ $(LIBS) + +spend_transaction_debug: $(SPEND_TRANSACTION_OUTPUT_DEBUG) +$(SPEND_TRANSACTION_OUTPUT_DEBUG): $(SPEND_TRANSACTION_SRCS) + $(CXX) $(DEBUG_FLAGS) $(CXXFLAGS) $^ -o $@ $(LIBS) + +f4grumble: $(F4GRUMBLE_OUTPUT) +$(F4GRUMBLE_OUTPUT): $(F4GRUMBLE_SRCS) + $(CXX) $(CXXFLAGS) $^ -o $@ $(LIBS) + +f4grumble_debug: $(F4GRUMBLE_OUTPUT_DEBUG) +$(F4GRUMBLE_OUTPUT_DEBUG): $(F4GRUMBLE_SRCS) + $(CXX) $(DEBUG_FLAGS) $(CXXFLAGS) $^ -o $@ $(LIBS) + +clean: + rm -f $(BPPLUS_OUTPUT) $(BPPLUS_OUTPUT_DEBUG) $(BECH32_OUTPUT) $(BECH32_OUTPUT_DEBUG) $(AEAD_OUTPUT) $(AEAD_OUTPUT_DEBUG) $(GROOTLE_OUTPUT) $(GROOTLE_OUTPUT_DEBUG) $(CHAUM_OUTPUT) $(CHAUM_OUTPUT_DEBUG) $(SCHNORR_OUTPUT) $(SCHNORR_OUTPUT_DEBUG) $(COIN_OUTPUT) $(COIN_OUTPUT_DEBUG) $(MINT_TRANSACTION_OUTPUT) $(MINT_TRANSACTION_OUTPUT_DEBUG) $(SPEND_TRANSACTION_OUTPUT) $(SPEND_TRANSACTION_OUTPUT_DEBUG) *.o diff --git a/src/fuzz/README.md b/src/fuzz/README.md new file mode 100644 index 0000000000..e3c68835e9 --- /dev/null +++ b/src/fuzz/README.md @@ -0,0 +1,142 @@ +# Fuzzing libspark + +## Quickstart Guide +To quickly get started fuzzing libspark using honggfuzz: + +### Build firo +- clone this repo: +``` +git clone -b spark https://github.com/firoorg/firo.git +``` +- Build firo: Follow instruction from https://github.com/firoorg/firo/tree/spark#readme + +Once the build is successful, we have to install honggfuzz and required dependencies. + +### Installing fuzzer and Dependencies +- Install honggfuzz (https://github.com/google/honggfuzz) +``` +sudo apt-get install binutils-dev libunwind-dev libblocksruntime-dev clang +git clone https://github.com/google/honggfuzz.git +cd honggfuzz +make +sudo make install +``` +For more information you can look at https://github.com/google/honggfuzz/blob/master/docs/USAGE.md + +You might also need to install the following boost and ssl dependencies in order to compile the fuzzing harness: + +``` +sudo apt install libboost-dev +sudo apt install libssl-dev +sudo apt install libstdc++-12-dev +sudo apt install libboost-filesystem-dev +sudo apt install libboost-thread-dev +sudo apt install libboost-program-options-dev +sudo apt install libboost-chrono-dev +``` + +### Fuzzing using honggfuzz +* In order to fuzz `firo/src/libpark` using Honggfuzz: + +``` +cd firo/src/fuzz/ +export CC=hfuzz-clang +export CXX=hfuzz-clang++ +``` + +To compile with `hfuzz-clang++`, inside src/fuzz run: + +``` +make +``` + +For example(for bpplus): +``` +make bpplus +``` +The above command will generate an instrumented binary with name `_hfuzz` (eg: bpplus_hfuzz) inside src/fuzz/libspark. + +The fuzzing harness of the following spark files is availabe: aead, bech32, bpplus, chaum, coin, f4grumble, grootle, mint_transaction, schnorr and spend_transaction. + +* To start fuzzing: + +1. create directories for input corpora and for saving all the crashes +``` +mkdir input crashes +``` +2. Inside the crashes directory run: +``` +honggfuzz -i input -- ./libspark/_hfuzz ___FILE___ +``` + +example: +1. `mkdir input crashes` +2. `cd crashes` +2. `honggfuzz -i ../input -- ./../libspark/bpplus_hfuzz ___FILE___` +3. To stop press `ctrl+c` + +Here we are providing an empty corpora. In case of an already available corpora, we can provide the availabe corpora. +The flag `-i` is for the input folder which we are providing `./../_hfuzz>` is the target binary which we want to fuzz. + +### Analyzing the crashes + +If there is a crash, the reason for the crash can be found in HONGGFUZZ.REPORT.TXT or simply by running +``` +./libspark/ +``` + +Example: +``` +./libspark/bpplus_hfuzz SIGABRT.PC.7ffff7a8400b.STACK.1b5b5f0067.CODE.-6.ADDR.0.INSTR.mov____0x108(%rsp),%rax +``` + +To debug or to do the rootcause analysis, gdb debugger can be used. to debug using gdb debugger: + +1. First compile the harness using gdb flags `-g -O0 -ggdb`. To compile using gdb debugger, inside `src/fuzz` run: +``` +make _debug +``` +Example: +``` +make bpplus_debug +``` + +2. start the debugger by running: +``` +gdb --args +``` +Example: +``` +gdb --args bpplus_debug SIGABRT.PC.7ffff7a8400b.STACK.1b5b5f0067.CODE.-6.ADDR.0.INSTR.mov____0x108(%rsp),%rax +``` +This will start the debugger. + +3. You can do heap analysis by running `heap-analysis` inside the debugger and/or `bt` for backtracing. + + +### Generating a Coverage Report using kcov +* Install kcov (https://github.com/SimonKagstrom/kcov/tree/master) +``` +sudo apt-get install binutils-dev libssl-dev libcurl4-openssl-dev zlib1g-dev libdw-dev libiberty-dev +git clone https://github.com/SimonKagstrom/kcov.git +cd /path/to/kcov/source/dir +mkdir build +cd build +cmake .. +make +sudo make install +``` +Once successfully installed, follow the below instructions to generate the code-coverage + +1. First compile the harness with gdb flag. run `make _debug` inside src/fuzz to compile using gdb debugger. +2. take the input_folder as the input corpora from fuzzing or one can also create it by running: `honggfuzz -i -– ./_hfuzz ___FILE___ @@`. This will start the fuzzer. Kill it by `ctrl+C`. The fuzzer will generate some random inputs inside the input_folder. Since kcov will generate coverage for each input inside the input_folder, it's preffered to have only a few inputs, otherwise it will take a long time to generate the entire coverage. + +3. inside the `generate_coverage.sh` replace the input_folder, output_folder and fuzz_exe by your inpur corpora, coverage output folder and harness binary. +4. run `./generate_coverage.sh`. This will generated a merged output for all the inputs present in the input_folder. +5. To view the result run run `firefox ./merged-output/index.html`. + +6. alternatively or if you are on a VM, go inside coverage output folder and then merged-output +7. run `python3 -m http.server`. This will start a http server at http://0.0.0.0:8000/ +8. open your browser and paste http://0.0.0.0:8000/ to see the result. + +NOTE: to view the coverage for every dependent file, `generate_coverage.sh` should be in the root folder. Also, you should either delete the previous port or start the server on new port by running `python3 -m http.server ` for different files. \ No newline at end of file diff --git a/src/fuzz/fuzzing_utilities.cpp b/src/fuzz/fuzzing_utilities.cpp new file mode 100644 index 0000000000..af9b1f2c65 --- /dev/null +++ b/src/fuzz/fuzzing_utilities.cpp @@ -0,0 +1,89 @@ +#include "fuzzing_utilities.h" + +FuzzedSecp256k1Object::FuzzedSecp256k1Object(FuzzedDataProvider *fdp) { + this->fdp = fdp; +} + +secp_primitives::GroupElement FuzzedSecp256k1Object::GetGroupElement() { + char* x = (char *)this->fdp->ConsumeBytes(256).data(); + char* y = (char *)this->fdp->ConsumeBytes(256).data(); + secp_primitives::GroupElement ge = secp_primitives::GroupElement(x, y); + + return ge; +} + +secp_primitives::Scalar FuzzedSecp256k1Object::GetScalar() { + uint64_t value = this->fdp->ConsumeIntegral(); + secp_primitives::Scalar s = secp_primitives::Scalar(value); + + return s; +} + +secp_primitives::GroupElement FuzzedSecp256k1Object::GetMemberGroupElement() { + secp_primitives::GroupElement ge; + ge.randomize(); + return ge; +} + +std::vector FuzzedSecp256k1Object::GetMemberGroupElements(size_t len) { + std::vector ge_vec; + ge_vec.resize(len); + for (size_t i = 0; i < len; i++) { + ge_vec[i] = (GetMemberGroupElement()); + } + return ge_vec; +} + +std::vector FuzzedSecp256k1Object::GetRandomGroupVector(size_t len) { + std::vector result; + result.resize(len); + for (size_t i = 0; i < len; i++) { + result[i].randomize(); + } + return result; +} + +std::vector FuzzedSecp256k1Object::GetGroupElements(int len) { + std::vector ge_vec; + ge_vec.reserve(len); + for (int i = 0; i < len; i++) { + ge_vec.push_back(GetGroupElement()); + } + + return ge_vec; +} + +std::vector FuzzedSecp256k1Object::GetScalars(size_t len) { + std::vector scalar_vec; + scalar_vec.reserve(len); + for (int i = 0; i < len; i++) { + scalar_vec.push_back(GetScalar()); + } + + return scalar_vec; +} + +std::vector FuzzedSecp256k1Object::GetScalarsVector(size_t len) { + std::vector scalar_vec; + scalar_vec.reserve(len); + for (int i = 0; i < len; i++) { + scalar_vec.push_back(GetScalar()); + } + + return scalar_vec; +} + +secp_primitives::Scalar FuzzedSecp256k1Object::GetScalar_modified() { + secp_primitives::Scalar s = secp_primitives::Scalar(this->fdp->ConsumeBytes(256).data()); + return s; +} + +std::vector FuzzedSecp256k1Object::GetScalars_modified(int len) { + std::vector scalar_vec; + scalar_vec.reserve(len); + for (int i = 0; i < len; i++) { + scalar_vec.push_back(GetScalar_modified()); + } + + return scalar_vec; +} \ No newline at end of file diff --git a/src/fuzz/fuzzing_utilities.h b/src/fuzz/fuzzing_utilities.h new file mode 100644 index 0000000000..360d27c7e3 --- /dev/null +++ b/src/fuzz/fuzzing_utilities.h @@ -0,0 +1,23 @@ +#include "FuzzedDataProvider.h" +#include "../secp256k1/include/Scalar.h" +#include "../secp256k1/include/GroupElement.h" + +class FuzzedSecp256k1Object { + public: + FuzzedSecp256k1Object(FuzzedDataProvider *fdp); + + FuzzedDataProvider *fdp; + + secp_primitives::GroupElement GetGroupElement(); + secp_primitives::Scalar GetScalar(); + secp_primitives::GroupElement GetMemberGroupElement(); + secp_primitives::Scalar GetScalar_modified(); + + std::vector GetGroupElements(int len); + std::vector GetScalars(size_t len); + std::vector GetMemberGroupElements(size_t len); + std::vector GetRandomGroupVector(size_t len); + std::vector GetScalars_modified(int len); + std::vector GetScalarsVector(size_t len); + +}; \ No newline at end of file diff --git a/src/fuzz/generate_coverage.sh b/src/fuzz/generate_coverage.sh new file mode 100755 index 0000000000..bd91f0aaa6 --- /dev/null +++ b/src/fuzz/generate_coverage.sh @@ -0,0 +1,21 @@ +#!/bin/bash + +input_folder="../../src/fuzz/inputs/bpplus_inputs" +output_folder="../../src/fuzz/coverage_result/bpplus_coverage" +fuzz_exe="../../src/fuzz/libspark/bpplus_debug" + +mkdir $output_folder + +number_of_files=$(ls $input_folder | wc | awk '{print $1}') +echo "Number of input files to test: $number_of_files" + +count=0 + +for i in $(ls $input_folder); +do + kcov --include-path=. ./$output_folder/input_$count ./$fuzz_exe --stdout -d ./$input_folder/$i > /dev/null; + ((count++)); + echo "[++] Count of files processed: $count"; +done + +kcov --merge ./$output_folder/merged-output ./$output_folder/input_* \ No newline at end of file diff --git a/src/fuzz/libspark/aead_fuzz.cpp b/src/fuzz/libspark/aead_fuzz.cpp new file mode 100644 index 0000000000..b1b087dafb --- /dev/null +++ b/src/fuzz/libspark/aead_fuzz.cpp @@ -0,0 +1,24 @@ +#include "../fuzzing_utilities.h" +#include "../FuzzedDataProvider.h" +#include "../../libspark/aead.h" +#include + + +extern "C" int LLVMFuzzerTestOneInput(uint8_t *buf, size_t len) { + FuzzedDataProvider fdp(buf, len); + FuzzedSecp256k1Object fsp(&fdp); + + secp_primitives::GroupElement ge = fsp.GetGroupElement(); + std::string additional_data = fdp.ConsumeBytesAsString(len); + int fuzzed_message = fdp.ConsumeIntegral(); + CDataStream ser(SER_NETWORK, PROTOCOL_VERSION); + ser << fuzzed_message; + + spark::AEADEncryptedData aed = spark::AEAD::encrypt(ge, additional_data, ser); + ser = spark::AEAD::decrypt_and_verify(ge, additional_data, aed); + int received_fuzzed_message; + ser >> received_fuzzed_message; + assert(fuzzed_message == received_fuzzed_message); + + return 0; +} \ No newline at end of file diff --git a/src/fuzz/libspark/aead_fuzz_random_key.cpp b/src/fuzz/libspark/aead_fuzz_random_key.cpp new file mode 100644 index 0000000000..631f027dd7 --- /dev/null +++ b/src/fuzz/libspark/aead_fuzz_random_key.cpp @@ -0,0 +1,24 @@ +#include "../fuzzing_utilities.h" +#include "../FuzzedDataProvider.h" +#include "../../libspark/aead.h" +#include + + +extern "C" int LLVMFuzzerTestOneInput(uint8_t *buf, size_t len) { + FuzzedDataProvider fdp(buf, len); + FuzzedSecp256k1Object fsp(&fdp); + + secp_primitives::GroupElement ge = fsp.GetMemberGroupElement(); + std::string additional_data = fdp.ConsumeBytesAsString(len); + int fuzzed_message = fdp.ConsumeIntegral(); + CDataStream ser(SER_NETWORK, PROTOCOL_VERSION); + ser << fuzzed_message; + + spark::AEADEncryptedData aed = spark::AEAD::encrypt(ge, additional_data, ser); + ser = spark::AEAD::decrypt_and_verify(ge, additional_data, aed); + int received_fuzzed_message; + ser >> received_fuzzed_message; + assert(fuzzed_message == received_fuzzed_message); + + return 0; +} \ No newline at end of file diff --git a/src/fuzz/libspark/bech32_fuzz.cpp b/src/fuzz/libspark/bech32_fuzz.cpp new file mode 100644 index 0000000000..192b23b27d --- /dev/null +++ b/src/fuzz/libspark/bech32_fuzz.cpp @@ -0,0 +1,46 @@ +#include "../../libspark/bech32.h" +#include "../FuzzedDataProvider.h" +#include +#include + +enum class Bech32EncodingForFuzzing { + INVALID, + BECH32, + BECH32M, + kMaxValue = BECH32M +}; + +extern "C" int LLVMFuzzerTestOneInput(uint8_t *buf, size_t len) { + FuzzedDataProvider fuzzed_data(buf, len); + + std::string test_string = fuzzed_data.ConsumeBytesAsString(len); + std::vector test_vec = fuzzed_data.ConsumeBytes(len); + Bech32EncodingForFuzzing test_encoding_helper = fuzzed_data.ConsumeEnum(); + bech32::Encoding test_encoding; + switch (test_encoding_helper) { + case Bech32EncodingForFuzzing::INVALID: + test_encoding = bech32::Encoding::INVALID; + break; + case Bech32EncodingForFuzzing::BECH32: + test_encoding = bech32::Encoding::BECH32; + break; + case Bech32EncodingForFuzzing::BECH32M: + test_encoding = bech32::Encoding::BECH32M; + break; + } + std::string test_string_res; + test_string_res = bech32::encode(test_string, test_vec, test_encoding); + bech32::DecodeResult dr; + dr = bech32::decode(test_string_res); + assert(dr.hrp == test_string); + assert(dr.encoding == test_encoding); + assert(dr.data == test_vec); + + std::vector test_vec1 = fuzzed_data.ConsumeBytes(len); + std::vector test_vec2 = fuzzed_data.ConsumeBytes(len); + int test_frombits = fuzzed_data.ConsumeIntegral(); + int test_to_bits = fuzzed_data.ConsumeIntegral(); + bool test_pad = fuzzed_data.ConsumeBool(); + bech32::convertbits(test_vec1, test_vec2, test_frombits, test_to_bits, test_pad); + return 0; +} diff --git a/src/fuzz/libspark/bech32_fuzz_2.cpp b/src/fuzz/libspark/bech32_fuzz_2.cpp new file mode 100644 index 0000000000..bf71d3dd58 --- /dev/null +++ b/src/fuzz/libspark/bech32_fuzz_2.cpp @@ -0,0 +1,62 @@ +#include "../../libspark/bech32.h" +#include "../FuzzedDataProvider.h" +#include +#include +#include + +// enum class Bech32EncodingForFuzzing { +// INVALID, +// BECH32, +// BECH32M, +// kMaxValue = BECH32M +// }; + +bool CaseInsensitiveEqual(const std::string& s1, const std::string& s2) +{ + if (s1.size() != s2.size()) return false; + for (size_t i = 0; i < s1.size(); ++i) { + char c1 = s1[i]; + if (c1 >= 'A' && c1 <= 'Z') c1 -= ('A' - 'a'); + char c2 = s2[i]; + if (c2 >= 'A' && c2 <= 'Z') c2 -= ('A' - 'a'); + if (c1 != c2) return false; + } + return true; +} + +extern "C" int LLVMFuzzerTestOneInput(uint8_t *buf, size_t len) { + FuzzedDataProvider fuzzed_data(buf, len); + + std::string test_string = fuzzed_data.ConsumeBytesAsString(len); + + const auto r1 = bech32::decode(test_string); + if(r1.hrp.empty()) { + assert(r1.encoding == bech32::Encoding::INVALID); + assert(r1.data.empty()); + } else { + assert(r1.encoding != bech32::Encoding::INVALID); + const std::string reencoded = bech32::encode(r1.hrp, r1.data, r1.encoding); + assert(CaseInsensitiveEqual(test_string, reencoded)); + } + + std::vector input = fuzzed_data.ConsumeBytes(len); + std::vector test_vec2 = fuzzed_data.ConsumeBytes(len); + int test_frombits = fuzzed_data.ConsumeIntegral(); + int test_to_bits = fuzzed_data.ConsumeIntegral(); + bool test_pad = fuzzed_data.ConsumeBool(); + bech32::convertbits(input, test_vec2, test_frombits, test_to_bits, test_pad); + + if(input.size() + 3 + 6 <= 90) { + for (auto encoding: {bech32::Encoding::BECH32, bech32::Encoding::BECH32M}) { + const std::string encoded = bech32::encode("bc", input, encoding ); + assert(!encoded.empty()); + + const auto r2 = bech32::decode(encoded); + assert(r2.encoding == encoding); + assert(r2.hrp == "bc"); + assert(r2.data == input); + } + } + + return 0; +} diff --git a/src/fuzz/libspark/bpplus_fuzz.cpp b/src/fuzz/libspark/bpplus_fuzz.cpp new file mode 100644 index 0000000000..82f5504572 --- /dev/null +++ b/src/fuzz/libspark/bpplus_fuzz.cpp @@ -0,0 +1,112 @@ +#include "../fuzzing_utilities.h" +#include "../FuzzedDataProvider.h" +#include "../../libspark/bpplus.h" +#include "../../libspark/bpplus_proof.h" +#include + + +extern "C" int LLVMFuzzerTestOneInput(uint8_t *buf, size_t len) { + FuzzedDataProvider fdp(buf, len); + FuzzedSecp256k1Object fsp(&fdp); + + /** Single Proof **/ + size_t N0 = fdp.ConsumeIntegralInRange(0,64); + size_t M0 = fdp.ConsumeIntegral(); + + N0 = 64; + M0 = 4; + // Generators + GroupElement G0, H0; + G0.randomize(); + H0.randomize(); + + std::vector Gi0, Hi0; + size_t generators_needed = N0*M0; + if (!spark::is_nonzero_power_of_2(generators_needed)) { + generators_needed = 1 << (spark::log2(N0*M0) + 1); + } + + Gi0.resize(generators_needed); + Hi0.resize(generators_needed); + for (size_t i=0; i < generators_needed; i++) { + Gi0[i].randomize(); + Hi0[i].randomize(); + } + + // Commitments + std::vector v, r; + v.resize(M0); + r.resize(M0); + // v = fsp.GetScalars(M0); + // r = fsp.GetScalars(M0); + for(int i = 0; i < M0; i++){ + v[i] = Scalar((uint64_t) rand()); + r[i].randomize(); + } + + std::vector C0; + C0.resize(M0); + for (size_t i=0; i < M0; i++) { + C0[i] = G0*v[i] + H0*r[i]; + } + + spark::BPPlus bpplus0(G0, H0, Gi0, Hi0, N0); + spark::BPPlusProof proof0; + bpplus0.prove(v, r, C0, proof0); + assert(bpplus0.verify(C0, proof0)); + /** End of Single proof fuzz test**/ + + /** Batch Proof **/ + + size_t N1 = fdp.ConsumeIntegralInRange(1,64); + size_t B = fdp.ConsumeIntegral(); + N1 = 64; + B = 5; + + std::vector sizes; + sizes.resize(B); + for(int i = 0; i < B; i++){ + sizes[i] = (fdp.ConsumeIntegral() % 8) + 1 ; // otherwise it's "Bad BPPlus statement!4" line 102 bpplus.cpp since B = 5.(checked) + } + // sizes = fdp.ConsumeRemainingBytes(); + + // Generators + GroupElement G1, H1; + G1.randomize(); + H1.randomize(); + + // std::size_t next_power = 1 << (uint(log2(B)) + 1); + std::vector Gi1, Hi1; + Gi1.resize(8*N1); + Hi1.resize(8*N1); + for (size_t i=0; i < 8*N1; i++) { + Hi1[i].randomize(); + Gi1[i].randomize(); + } + + spark::BPPlus bpplus1(G1, H1, Gi1, Hi1, N1); + std::vector proofs; + proofs.resize(B); + std::vector> C1; + + for (size_t i=0; i < B; i++) { + std::size_t M = sizes[i]; + std::vector v, r; + v.resize(M); + r.resize(M); + std::vector C_; + C_.resize(M); + for (size_t j=0; j < M; j++) { + v[j] = Scalar(uint64_t(j)); + r[j].randomize(); + C_[j] = G1*v[j] + H1*r[j]; + } + C1.emplace_back(C_); + bpplus1.prove(v, r, C_, proofs[i]); + } + assert(bpplus1.verify(C1, proofs)); + + /** End of Batch proof fuzz test **/ + + return 0; +} diff --git a/src/fuzz/libspark/chaum_fuzz.cpp b/src/fuzz/libspark/chaum_fuzz.cpp new file mode 100644 index 0000000000..e25a9a8b00 --- /dev/null +++ b/src/fuzz/libspark/chaum_fuzz.cpp @@ -0,0 +1,229 @@ +#include "../fuzzing_utilities.h" +#include "../FuzzedDataProvider.h" +#include "../../libspark/chaum_proof.h" +#include "../../libspark/chaum.h" +#include + +extern "C" int LLVMFuzzerTestOneInput(uint8_t *buf, size_t len) { + FuzzedDataProvider fdp(buf, len); + FuzzedSecp256k1Object fsp(&fdp); + + /** Serialization tests **/ + GroupElement F0, G0, H0, U0; + F0.randomize(); + G0.randomize(); + H0.randomize(); + U0.randomize(); + + const std::size_t n = fdp.ConsumeIntegral(); + + Scalar mu0; + mu0.randomize(); + std::vector x0, y0, z0; + x0.resize(n); + y0.resize(n); + z0.resize(n); + std::vector S0, T0; + S0.resize(n); + T0.resize(n); + for (size_t i=0; i < n; i++) { + x0[i].randomize(); + y0[i].randomize(); + z0[i].randomize(); + + S0[i] = F0*x0[i] + G0*y0[i] + H0*z0[i]; + T0[i] = (U0 + G0*y0[i].negate())*x0[i].inverse(); + } + + spark::ChaumProof proof0; + + spark::Chaum chaum0(F0, G0, H0, U0); + chaum0.prove(mu0, x0, y0, z0, S0, T0, proof0); + + CDataStream serialized(SER_NETWORK, PROTOCOL_VERSION); + serialized << proof0; + + spark::ChaumProof deserialized_proof0; + serialized >> deserialized_proof0; + + assert(proof0.A1 == deserialized_proof0.A1); + assert(proof0.t2 == deserialized_proof0.t2); + assert(proof0.t3 == deserialized_proof0.t3); + for (size_t i = 0 ; i < n; i++) { + assert(proof0.A2[i] == deserialized_proof0.A2[i]); + assert(proof0.t1[i] == deserialized_proof0.t1[i]); + } + + /** Now fuzz all the things **/ + + GroupElement F1, G1, H1, U1; + F1 = fsp.GetMemberGroupElement(); + G1 = fsp.GetMemberGroupElement(); + H1 = fsp.GetMemberGroupElement(); + U1 = fsp.GetMemberGroupElement(); + //F1.randomize(); + //G1.randomize(); + //H1.randomize(); + //U1.randomize(); + + Scalar mu1; + mu1 = fsp.GetScalar(); + std::vector x1, y1, z1; + x1.resize(n); + x1 = fsp.GetScalars(n); + y1.resize(n); + y1 = fsp.GetScalars(n); + z1.resize(n); + z1 = fsp.GetScalars(n); + + std::vector S1, T1; + S1.resize(n); + T1.resize(n); + for (size_t i=0; i < n; i++) { + S1[i] = F1*x1[i] + G1*y1[i] + H1*z1[i]; + T1[i] = (U1 + G1*y1[i].negate())*x1[i].inverse(); + } + + spark::ChaumProof proof1; + + spark::Chaum chaum1(F1, G1, H1, U1); + chaum1.prove(mu1, x1, y1, z1, S1, T1, proof1); + + serialized << proof1; + + spark::ChaumProof deserialized_proof1; + serialized >> deserialized_proof1; + + assert(proof1.A1 == deserialized_proof1.A1); + assert(proof1.t2 == deserialized_proof1.t2); + assert(proof1.t3 == deserialized_proof1.t3); + for (size_t i = 0 ; i < n; i++) { + assert(proof1.A2[i] == deserialized_proof1.A2[i]); + assert(proof1.t1[i] == deserialized_proof1.t1[i]); + } + /**End of serialization tests**/ + + /** Completeness tests **/ + + GroupElement F2, G2, H2, U2; + F2.randomize(); + G2.randomize(); + H2.randomize(); + U2.randomize(); + + Scalar mu2; + mu2.randomize(); + std::vector x2, y2, z2; + x2.resize(n); + y2.resize(n); + z2.resize(n); + std::vector S2, T2; + S2.resize(n); + T2.resize(n); + for (size_t i=0; i < n; i++) { + x2[i].randomize(); + y2[i].randomize(); + z2[i].randomize(); + + S2[i] = F2*x2[i] + G2*y2[i] + H2*z2[i]; + T2[i] = (U2 + G2*y2[i].negate())*x2[i].inverse(); + } + + spark::ChaumProof proof2; + + spark::Chaum chaum2(F2, G2, H2, U2); + chaum2.prove(mu2, x2, y2, z2, S2, T2, proof2); + assert(chaum2.verify(mu2, S2, T2, proof2)); + + /** Full all the things again**/ + + GroupElement F3, G3, H3, U3; + F3 = fsp.GetMemberGroupElement(); + G3 = fsp.GetMemberGroupElement(); + H3 = fsp.GetMemberGroupElement(); + U3 = fsp.GetMemberGroupElement(); + //F3.randomize(); + //G3.randomize(); + //H3.randomize(); + //U3.randomize(); + + + Scalar mu3; + mu3 = fsp.GetScalar(); + std::vector x3, y3, z3; + x3.resize(n); + x3 = fsp.GetScalars(n); + y3.resize(n); + y3 = fsp.GetScalars(n); + z3.resize(n); + z3 = fsp.GetScalars(n); + + std::vector S3, T3; + S3.resize(n); + T3.resize(n); + for (size_t i=0; i < n; i++) { + S3[i] = F3*x3[i] + G3*y3[i] + H3*z3[i]; + T3[i] = (U3 + G3*y3[i].negate())*x3[i].inverse(); + } + + spark::ChaumProof proof3; + + spark::Chaum chaum3(F3, G3, H3, U3); + chaum3.prove(mu3, x3, y3, z3, S3, T3, proof3); + assert(chaum3.verify(mu3, S3, T3, proof3)); + + /** End of completeness tests**/ + + /* Fuzzing for bad proofs*/ + + // Bad mu + Scalar evil_mu; + evil_mu.randomize(); + assert(!(chaum3.verify(evil_mu, S3, T3, proof3))); + + // Bad S + for (std::size_t i = 0; i < n; i++) { + std::vector evil_S(S3); + evil_S[i].randomize(); + assert(!(chaum3.verify(mu3, evil_S, T3, proof3))); + } + + // Bad T + for (std::size_t i = 0; i < n; i++) { + std::vector evil_T(T3); + evil_T[i].randomize(); + assert(!(chaum3.verify(mu3, S3, evil_T, proof3))); + } + + // Bad A1 + spark::ChaumProof evil_proof = proof3; + evil_proof.A1.randomize(); + assert(!(chaum3.verify(mu3, S3, T3, evil_proof))); + + // Bad A2 + for (std::size_t i = 0; i < n; i++) { + evil_proof = proof3; + evil_proof.A2[i].randomize(); + assert(!(chaum3.verify(mu3, S3, T3, evil_proof))); + } + + // Bad t1 + for (std::size_t i = 0; i < n; i++) { + evil_proof = proof3; + evil_proof.t1[i].randomize(); + assert(!(chaum3.verify(mu3, S3, T3, evil_proof))); + } + + // Bad t2 + evil_proof = proof3; + evil_proof.t2.randomize(); + assert(!(chaum3.verify(mu3, S3, T3, evil_proof))); + + // Bad t3 + evil_proof = proof3; + evil_proof.t3.randomize(); + assert(!(chaum3.verify(mu3, S3, T3, evil_proof))); + + return 0; + +} \ No newline at end of file diff --git a/src/fuzz/libspark/chaum_fuzz_2.cpp b/src/fuzz/libspark/chaum_fuzz_2.cpp new file mode 100644 index 0000000000..9015c93b76 --- /dev/null +++ b/src/fuzz/libspark/chaum_fuzz_2.cpp @@ -0,0 +1,145 @@ +#include "../fuzzing_utilities.h" +#include "../FuzzedDataProvider.h" +#include "../../libspark/chaum_proof.h" +#include "../../libspark/chaum.h" +#include + +extern "C" int LLVMFuzzerTestOneInput(uint8_t *buf, size_t len) { + + if (len == 0) { + return 0; + } + + FuzzedDataProvider fdp(buf, len); + FuzzedSecp256k1Object fsp(&fdp); + + GroupElement F1, G1, H1, U1; + std::vector ge = fsp.GetGroupElements(4); + + F1 = ge[0]; + G1 = ge[1]; + H1 = ge[2]; + U1 = ge[3]; + + const std::size_t n = fdp.ConsumeIntegral(); + + Scalar mu1; + mu1 = fsp.GetScalar(); + std::vector x1, y1, z1; + x1.resize(n); + x1 = fsp.GetScalars(n); + y1.resize(n); + y1 = fsp.GetScalars(n); + z1.resize(n); + z1 = fsp.GetScalars(n); + + std::vector S1, T1; + S1.resize(n); + T1.resize(n); + for (size_t i=0; i < n; i++) { + S1[i] = F1*x1[i] + G1*y1[i] + H1*z1[i]; + T1[i] = (U1 + G1*y1[i].negate())*x1[i].inverse(); + } + + spark::ChaumProof proof1; + + spark::Chaum chaum1(F1, G1, H1, U1); + chaum1.prove(mu1, x1, y1, z1, S1, T1, proof1); + + CDataStream serialized(SER_NETWORK, PROTOCOL_VERSION); + serialized << proof1; + + spark::ChaumProof deserialized_proof1; + serialized >> deserialized_proof1; + + assert(proof1.A1 == deserialized_proof1.A1); + assert(proof1.t2 == deserialized_proof1.t2); + assert(proof1.t3 == deserialized_proof1.t3); + for (size_t i = 0 ; i < n; i++) { + assert(proof1.A2[i] == deserialized_proof1.A2[i]); + assert(proof1.t1[i] == deserialized_proof1.t1[i]); + } + + GroupElement F3, G3, H3, U3; + F3 = fsp.GetGroupElement(); + G3 = fsp.GetGroupElement(); + H3 = fsp.GetGroupElement(); + U3 = fsp.GetGroupElement(); + + Scalar mu3; + mu3 = fsp.GetScalar(); + std::vector x3, y3, z3; + x3.resize(n); + x3 = fsp.GetScalars(n); + y3.resize(n); + y3 = fsp.GetScalars(n); + z3.resize(n); + z3 = fsp.GetScalars(n); + + std::vector S3, T3; + S3.resize(n); + T3.resize(n); + for (size_t i=0; i < n; i++) { + S3[i] = F3*x3[i] + G3*y3[i] + H3*z3[i]; + T3[i] = (U3 + G3*y3[i].negate())*x3[i].inverse(); + } + + spark::ChaumProof proof3; + + spark::Chaum chaum3(F3, G3, H3, U3); + chaum3.prove(mu3, x3, y3, z3, S3, T3, proof3); + assert(chaum3.verify(mu3, S3, T3, proof3)); + + /* Fuzzing for bad proofs*/ + + // Bad mu + Scalar evil_mu; + evil_mu.randomize(); + assert(!(chaum3.verify(evil_mu, S3, T3, proof3))); + + // Bad S + for (std::size_t i = 0; i < n; i++) { + std::vector evil_S(S3); + evil_S[i].randomize(); + assert(!(chaum3.verify(mu3, evil_S, T3, proof3))); + } + + // Bad T + for (std::size_t i = 0; i < n; i++) { + std::vector evil_T(T3); + evil_T[i].randomize(); + assert(!(chaum3.verify(mu3, S3, evil_T, proof3))); + } + + // Bad A1 + spark::ChaumProof evil_proof = proof3; + evil_proof.A1.randomize(); + assert(!(chaum3.verify(mu3, S3, T3, evil_proof))); + + // Bad A2 + for (std::size_t i = 0; i < n; i++) { + evil_proof = proof3; + evil_proof.A2[i].randomize(); + assert(!(chaum3.verify(mu3, S3, T3, evil_proof))); + } + + // Bad t1 + for (std::size_t i = 0; i < n; i++) { + evil_proof = proof3; + evil_proof.t1[i].randomize(); + assert(!(chaum3.verify(mu3, S3, T3, evil_proof))); + } + + // Bad t2 + evil_proof = proof3; + evil_proof.t2.randomize(); + assert(!(chaum3.verify(mu3, S3, T3, evil_proof))); + + // Bad t3 + evil_proof = proof3; + evil_proof.t3.randomize(); + assert(!(chaum3.verify(mu3, S3, T3, evil_proof))); + + return 0; + +} \ No newline at end of file diff --git a/src/fuzz/libspark/chaum_fuzz_member.cpp b/src/fuzz/libspark/chaum_fuzz_member.cpp new file mode 100644 index 0000000000..f8625545e3 --- /dev/null +++ b/src/fuzz/libspark/chaum_fuzz_member.cpp @@ -0,0 +1,143 @@ +#include "../fuzzing_utilities.h" +#include "../FuzzedDataProvider.h" +#include "../../libspark/chaum_proof.h" +#include "../../libspark/chaum.h" +#include + +extern "C" int LLVMFuzzerTestOneInput(uint8_t *buf, size_t len) { + FuzzedDataProvider fdp(buf, len); + FuzzedSecp256k1Object fsp(&fdp); + + /** Serialization tests **/ + GroupElement F0, G0, H0, U0; + F0.randomize(); + G0.randomize(); + H0.randomize(); + U0.randomize(); + + const std::size_t n = fdp.ConsumeIntegralInRange(1, INT_MAX); + + Scalar mu0; + mu0.randomize(); + std::vector x0, y0, z0; + x0.resize(n); + y0.resize(n); + z0.resize(n); + std::vector S0, T0; + S0.resize(n); + T0.resize(n); + for (size_t i=0; i < n; i++) { + x0[i].randomize(); + y0[i].randomize(); + z0[i].randomize(); + + S0[i] = F0*x0[i] + G0*y0[i] + H0*z0[i]; + T0[i] = (U0 + G0*y0[i].negate())*x0[i].inverse(); + } + + spark::ChaumProof proof0; + + spark::Chaum chaum0(F0, G0, H0, U0); + chaum0.prove(mu0, x0, y0, z0, S0, T0, proof0); + + CDataStream serialized(SER_NETWORK, PROTOCOL_VERSION); + serialized << proof0; + + spark::ChaumProof deserialized_proof0; + serialized >> deserialized_proof0; + + assert(proof0.A1 == deserialized_proof0.A1); + assert(proof0.t2 == deserialized_proof0.t2); + assert(proof0.t3 == deserialized_proof0.t3); + for (size_t i = 0 ; i < n; i++) { + assert(proof0.A2[i] == deserialized_proof0.A2[i]); + assert(proof0.t1[i] == deserialized_proof0.t1[i]); + } + + // fuzz completeness + GroupElement F1, G1, H1, U1; + F1.randomize(); + G1.randomize(); + H1.randomize(); + U1.randomize(); + + const std::size_t n1 = fdp.ConsumeIntegralInRange(1, INT_MAX); + + Scalar mu1; + mu1.randomize(); + std::vector x1, y1, z1; + x1.resize(n1); + y1.resize(n1); + z1.resize(n1); + std::vector S1, T1; + S1.resize(n1); + T1.resize(n1); + for (std::size_t i = 0; i < n; i++) { + x1[i].randomize(); + y1[i].randomize(); + z1[i].randomize(); + + S1[i] = F1*x1[i] + G1*y1[i] + H1*z1[i]; + T1[i] = (U1 + G1*y1[i].negate())*x1[i].inverse(); + } + + spark::ChaumProof proof1; + spark::Chaum chaum1(F1, G1, H1, U1); + chaum1.prove(mu1, x1, y1, z1, S1, T1, proof1); + + assert(chaum1.verify(mu1, S1, T1, proof1)); + /** End of completeness tests**/ + + /* Fuzzing for bad proofs*/ + + // Bad mu + Scalar evil_mu; + evil_mu.randomize(); + assert(!(chaum1.verify(evil_mu, S1, T1, proof1))); + + // Bad S + for (std::size_t i = 0; i < n1; i++) { + std::vector evil_S(S1); + evil_S[i].randomize(); + assert(!(chaum1.verify(m1, evil_S, T1, proof1))); + } + + // Bad T + for (std::size_t i = 0; i < n1; i++) { + std::vector evil_T(T1); + evil_T[i].randomize(); + assert(!(chaum1.verify(mu1, S1, evil_T, proof1))); + } + + // Bad A1 + spark::ChaumProof evil_proof = proof1; + evil_proof.A1.randomize(); + assert(!(chaum1.verify(mu1, S1, T1, evil_proof))); + + // Bad A2 + for (std::size_t i = 0; i < n1; i++) { + evil_proof = proof1; + evil_proof.A2[i].randomize(); + assert(!(chaum1.verify(mu1, S1, T1, evil_proof))); + } + + // Bad t1 + for (std::size_t i = 0; i < n1; i++) { + evil_proof = proof1; + evil_proof.t1[i].randomize(); + assert(!(chaum3.verify(mu3, S3, T3, evil_proof))); + } + + // Bad t2 + evil_proof = proof3; + evil_proof.t2.randomize(); + assert(!(chaum3.verify(mu3, S3, T3, evil_proof))); + + // Bad t3 + evil_proof = proof3; + evil_proof.t3.randomize(); + assert(!(chaum3.verify(mu3, S3, T3, evil_proof))); + + return 0; + +} \ No newline at end of file diff --git a/src/fuzz/libspark/coin_fuzz.cpp b/src/fuzz/libspark/coin_fuzz.cpp new file mode 100644 index 0000000000..79b0a6f0b4 --- /dev/null +++ b/src/fuzz/libspark/coin_fuzz.cpp @@ -0,0 +1,72 @@ +#include "../fuzzing_utilities.h" +#include "../FuzzedDataProvider.h" +#include "../../libspark/coin.h" +// #include "../../test/test_bitcoin.h" + +#include + +const std::size_t SCALAR_ENCODING = 32; +const char COIN_TYPE_MINT = 0; +const char COIN_TYPE_SPEND = 1; + + +extern "C" int LLVMFuzzerTestOneInput(uint8_t *buf, size_t len) { + FuzzedDataProvider fdp(buf, len); + FuzzedSecp256k1Object fsp(&fdp); + + // Scalar temp = fsp.GetScalar(); + Scalar temp; + temp.randomize(); + + std::vector result; + result.resize(SCALAR_ENCODING); + temp.serialize(result.data()); + + const spark::Params* params; + params = spark::Params::get_default(); + + const uint64_t i = len; + + // it will be better to choose s different way to generate the value + const uint64_t v = std::rand(); + const std::string memo = fdp.ConsumeBytesAsString(len); + + // Generate keys + spark::SpendKey spend_key(params); + spark::FullViewKey full_view_key(spend_key); + spark::IncomingViewKey incoming_view_key(full_view_key); + + // Generate address + spark::Address address(incoming_view_key, i); + + // Generate coin + // Scalar k = fsp.GetScalar(); + Scalar k; + k.randomize(); + + spark::Coin coin = spark::Coin ( + params, + COIN_TYPE_MINT, + k, + address, + v, + memo, + result + ); + + // Identify coin + spark::IdentifiedCoinData i_data = coin.identify(incoming_view_key); + assert(i_data.i == i); + assert(i_data.d == address.get_d()); + assert(i_data.v == v); + assert(i_data.memo == memo); + + // Recover coin + spark::RecoveredCoinData r_data = coin.recover(full_view_key, i_data); + assert(params->get_F()*(spark::SparkUtils::hash_ser(k, coin.serial_context) + spark::SparkUtils::hash_Q2(incoming_view_key.get_s1(), i) + full_view_key.get_s2()) + full_view_key.get_D() == params->get_F()*r_data.s + full_view_key.get_D()); + + assert(r_data.T * r_data.s + full_view_key.get_D() == params->get_U()); + + + +} \ No newline at end of file diff --git a/src/fuzz/libspark/f4grumble_fuzz.cpp b/src/fuzz/libspark/f4grumble_fuzz.cpp new file mode 100644 index 0000000000..9d0f9b8d43 --- /dev/null +++ b/src/fuzz/libspark/f4grumble_fuzz.cpp @@ -0,0 +1,62 @@ +#include "../../libspark/f4grumble.h" +#include + +extern "C" int LLVMFuzzerTestOneInput(uint8_t *buf, size_t len) { + std::string test_string = std::string((char *) buf); + std::vector test_char_vec; + test_char_vec.reserve(len); + + for (int i=0; i < len; i++) { + test_char_vec.push_back(test_string[i]); + } + + // too_long_size + bool exception_thrown_size = false; + bool exception_thrown_encode = false; + bool exception_thrown_decode = false; + + if(len > spark::F4Grumble::get_max_size()){ + + try { + spark::F4Grumble grumble(test_string[0], len); + } catch(const std::exception& ) { + exception_thrown_size = true; + } + assert(exception_thrown_size); + + spark::F4Grumble grumble = spark::F4Grumble(test_string[0], len); + + try { + grumble.encode(test_char_vec); + } catch (const std::exception& ) { + exception_thrown_encode = true; + } + + assert(exception_thrown_encode); + try { + grumble.decode(test_char_vec); + } catch (const std::exception& ) { + exception_thrown_decode = true; + } + assert(exception_thrown_decode); + return 0; + } + + spark::F4Grumble f4grumble_fuzz = spark::F4Grumble(test_string[0], len); + std::vector scrambled = f4grumble_fuzz.encode(test_char_vec); + std::vector unscrambled = f4grumble_fuzz.decode(scrambled); + + assert(scrambled.size() == test_char_vec.size()); + assert(unscrambled == test_char_vec); + + // bad_network + unsigned char evil_network = ~test_string[0]; + assert(test_string[0] != evil_network); + + spark::F4Grumble evil_grumble(evil_network, len); + //decoding with a different network + std::vector evil_unscrambled = evil_grumble.decode(scrambled); + assert(evil_unscrambled.size() == scrambled.size()); + assert(evil_unscrambled != test_char_vec); + return 0; +} \ No newline at end of file diff --git a/src/fuzz/libspark/grootle_fuzz.cpp b/src/fuzz/libspark/grootle_fuzz.cpp new file mode 100644 index 0000000000..de51fb0043 --- /dev/null +++ b/src/fuzz/libspark/grootle_fuzz.cpp @@ -0,0 +1,89 @@ +#include "../fuzzing_utilities.h" +#include "../FuzzedDataProvider.h" +#include "../../libspark/grootle.h" +#include "../../libspark/grootle_proof.h" +#include + +extern "C" int LLVMFuzzerTestOneInput(uint8_t *buf, size_t len) { + FuzzedDataProvider fdp(buf, len); + FuzzedSecp256k1Object fsp(&fdp); + + size_t n = fdp.ConsumeIntegral(); + size_t m = fdp.ConsumeIntegral(); + size_t N = (size_t) std::pow(n, m); + + GroupElement H; + std::vector Gi = fsp.GetGroupElements(n*m); + std::vector Hi = fsp.GetGroupElements(n*m); + + size_t commit_size = fdp.ConsumeIntegral(); + std::vector S = fsp.GetGroupElements(commit_size); + std::vector V = fsp.GetGroupElements(commit_size); + + std::vector indexes = fdp.ConsumeBytes(len); + std::vector sizes; + sizes.resize(len); + for(size_t i=0; i < len; i++) { + sizes[i] = fdp.ConsumeIntegral(); + } + std::vector S1, V1; + std::vector> roots; + std::vector s, v; + for (std::size_t index : indexes) { + Scalar s_, v_; + s_ = fsp.GetScalar(); + v_ = fsp.GetScalar(); + s.emplace_back(s_); + v.emplace_back(v_); + + S1.emplace_back(S[index]); + V1.emplace_back(V[index]); + + S[index] += H*s_; + V[index] += H*v_; + + Scalar temp; + temp = fsp.GetScalar(); + std::vector root; + root.reserve(spark::SCALAR_ENCODING); + temp.serialize(root.data()); + roots.emplace_back(root); + } + + spark::Grootle grootle(H, Hi, Hi, n, m); + std::vector proofs; + + for (size_t i=0; i < indexes.size(); i++) { + proofs.emplace_back(); + std::vector S_(S.begin() + commit_size - sizes[i], S.end()); + std::vector V_(V.begin() + commit_size - sizes[i], V.end()); + + grootle.prove( + indexes[i] - (commit_size - sizes[i]), + s[i], + S_, + S1[i], + v[i], + V_, + V1[i], + roots[i], + proofs.back() + + ); + + assert(grootle.verify(S, S1[i], V, V1[i], roots[i], sizes[i], proofs.back())); + } + + assert(grootle.verify(S, S1, V, V1, roots, sizes, proofs)); + + // Add an invalid proof + proofs.emplace_back(proofs.back()); + S1.emplace_back(S1.back()); + V1.emplace_back(V1.back()); + S1.back().randomize(); + sizes.emplace_back(sizes.back()); + assert(!grootle.verify(S, S1, V, V1, roots, sizes, proofs)); + + return 0; + +} \ No newline at end of file diff --git a/src/fuzz/libspark/grootle_fuzz_member.cpp b/src/fuzz/libspark/grootle_fuzz_member.cpp new file mode 100644 index 0000000000..2f0b57fb0e --- /dev/null +++ b/src/fuzz/libspark/grootle_fuzz_member.cpp @@ -0,0 +1,90 @@ +#include "../fuzzing_utilities.h" +#include "../FuzzedDataProvider.h" +#include "../../libspark/grootle.h" +#include "../../libspark/grootle_proof.h" +#include + +extern "C" int LLVMFuzzerTestOneInput(uint8_t *buf, size_t len) { + FuzzedDataProvider fdp(buf, len); + FuzzedSecp256k1Object fsp(&fdp); + + std::size_t n = fdp.ConsumeIntegralInRange(2, 65535); + std::size_t m = fdp.ConsumeIntegralInRange(2, 65535); + std::size_t N = (size_t) std::pow(n,m); + + GroupElement H; + H.randomize(); + std::vector Gi = fsp.GetRandomGroupVector(n*m); + std::vector Hi = fsp.GetRandomGroupVector(n*m); + + size_t commit_size = fdp.ConsumeIntegralInRange(1, N); + std::vector S = fsp.GetRandomGroupVector(commit_size); + std::vector V = fsp.GetRandomGroupVector(commit_size); + + std::vector indexes = fdp.ConsumeBytes(N); + std::vector sizes; + sizes.resize(N); + for(size_t i=0; i < N; i++) { + sizes[i] = fdp.ConsumeIntegralInRange(0, N); + } + std::vector S1, V1; + std::vector> roots; + std::vector s, v; + for (std::size_t index : indexes) { + Scalar s_, v_; + s_ = fsp.GetScalar(); + v_ = fsp.GetScalar(); + s.emplace_back(s_); + v.emplace_back(v_); + + S1.emplace_back(S[index]); + V1.emplace_back(V[index]); + + S[index] += H*s_; + V[index] += H*v_; + + Scalar temp; + temp = fsp.GetScalar(); + std::vector root; + root.reserve(spark::SCALAR_ENCODING); + temp.serialize(root.data()); + roots.emplace_back(root); + } + + spark::Grootle grootle(H, Gi, Hi, n, m); + std::vector proofs; + + for (size_t i=0; i < indexes.size(); i++) { + proofs.emplace_back(); + std::vector S_(S.begin() + commit_size - sizes[i], S.end()); + std::vector V_(V.begin() + commit_size - sizes[i], V.end()); + + grootle.prove( + indexes[i] - (commit_size - sizes[i]), + s[i], + S_, + S1[i], + v[i], + V_, + V1[i], + roots[i], + proofs.back() + + ); + + assert(grootle.verify(S, S1[i], V, V1[i], roots[i], sizes[i], proofs.back())); + } + + assert(grootle.verify(S, S1, V, V1, roots, sizes, proofs)); + + // Add an invalid proof + proofs.emplace_back(proofs.back()); + S1.emplace_back(S1.back()); + V1.emplace_back(V1.back()); + S1.back().randomize(); + sizes.emplace_back(sizes.back()); + assert(!grootle.verify(S, S1, V, V1, roots, sizes, proofs)); + + return 0; + +} \ No newline at end of file diff --git a/src/fuzz/libspark/mint_transaction_fuzz.cpp b/src/fuzz/libspark/mint_transaction_fuzz.cpp new file mode 100644 index 0000000000..8e10795667 --- /dev/null +++ b/src/fuzz/libspark/mint_transaction_fuzz.cpp @@ -0,0 +1,34 @@ +#include "../fuzzing_utilities.h" +#include "../FuzzedDataProvider.h" +#include "../../libspark/mint_transaction.h" +#include + +extern "C" int LLVMFuzzerTestOneInput(uint8_t *buf, size_t len) { + FuzzedDataProvider fdp(buf, len); + FuzzedSecp256k1Object fsp(&fdp); + + const spark::Params* params; + params = spark::Params::get_default(); + const size_t t = fdp.ConsumeIntegral(); + + spark::SpendKey spend_key(params); + spark::FullViewKey full_view_key(spend_key); + spark::IncomingViewKey incoming_view_key(full_view_key); + + std::vector outputs; + + for (size_t i = 0; i < t; i++) { + spark::MintedCoinData output; + output.address = spark::Address(incoming_view_key, fdp.ConsumeIntegral()); + output.v = fdp.ConsumeIntegral(); + output.memo = fdp.ConsumeBytesAsString(len); + outputs.emplace_back(output); + } + + spark::MintTransaction mint(params, outputs, fdp.ConsumeBytes(spark::SCALAR_ENCODING)); + assert(mint.verify()); + + + return 0; + +} \ No newline at end of file diff --git a/src/fuzz/libspark/schnorr_fuzz.cpp b/src/fuzz/libspark/schnorr_fuzz.cpp new file mode 100644 index 0000000000..735c68d67f --- /dev/null +++ b/src/fuzz/libspark/schnorr_fuzz.cpp @@ -0,0 +1,95 @@ +#include "../fuzzing_utilities.h" +#include "../FuzzedDataProvider.h" +#include "../../libspark/schnorr_proof.h" +#include "../../libspark/schnorr.h" +#include + +extern "C" int LLVMFuzzerTestOneInput(uint8_t *buf, size_t len) { + FuzzedDataProvider fdp(buf, len); + FuzzedSecp256k1Object fsp(&fdp); + + /** Serialization and Completeness tests **/ + GroupElement G0; + // G0 = fsp.GetGroupElement(); + + // NOTE: all GetGroupElement() is replaced by GetMemberGroupElement() + + // ensure that G0 is valid group element + // thus the crash of valid fieldElement and groupElement will not occur + G0.generate(buf); + + Scalar y0; + y0 = fsp.GetScalar(); + GroupElement Y0 = G0*y0; + + spark::SchnorrProof proof0; + + spark::Schnorr schnorr0(G0); + schnorr0.prove(y0, Y0, proof0); + + CDataStream serialized(SER_NETWORK, PROTOCOL_VERSION); + serialized << proof0; + + spark::SchnorrProof deserialized_proof0; + serialized >> deserialized_proof0; + + assert(proof0.A == deserialized_proof0.A); + assert(proof0.t == deserialized_proof0.t); + assert(schnorr0.verify(Y0, proof0)); + + /** End of serialization and completeness tests **/ + + /** Aggregation test **/ + + size_t n = fdp.ConsumeIntegral(); + + GroupElement G1; + G1 = fsp.GetMemberGroupElement(); + std::vector y1; + std::vector Y1; + + for(size_t i=0; i < n; i++) { + y1.emplace_back(); + y1.back() = fsp.GetScalar(); + + Y1.emplace_back(G1 * y1.back()); + } + + spark::SchnorrProof proof1; + spark::Schnorr schnorr1(G1); + schnorr1.prove(y1, Y1, proof1); + assert(schnorr1.verify(Y1, proof1)); + + /** End of aggregation test **/ + + /* + fuzzing bad proofs + */ + + // Bad Y + GroupElement evil_Y; + evil_Y.randomize(); + assert(!(schnorr1.verify(evil_Y, proof1))); + + // Bad A + spark::SchnorrProof evil_proof = proof1; + evil_proof.A.randomize(); + assert(!(schnorr1.verify(Y1, evil_proof))); + + // Bad t + evil_proof = proof1; + evil_proof.t.randomize(); + assert(!(schnorr1.verify(Y1, evil_proof))); + + // //checking empty proof + // std::vector y3; + // std::vector Y3; + // y3.resize(0); + // Y3.resize(0); + // spark::SchnorrProof proof3; + + // spark::Schnorr schnorr3(G1); + // schnorr3.prove(y3, Y3, proof3); + // assert(schnorr1.verify(Y3, proof3)); + +} \ No newline at end of file diff --git a/src/fuzz/libspark/spend_transaction_fuzz.cpp b/src/fuzz/libspark/spend_transaction_fuzz.cpp new file mode 100644 index 0000000000..14461e70b7 --- /dev/null +++ b/src/fuzz/libspark/spend_transaction_fuzz.cpp @@ -0,0 +1,110 @@ +#include "../fuzzing_utilities.h" +#include "../FuzzedDataProvider.h" +#include "../../libspark/spend_transaction.h" +#include + +extern "C" int LLVMFuzzerTestOneInput(uint8_t *buf, size_t len) { + FuzzedDataProvider fdp(buf, len); + FuzzedSecp256k1Object fsp(&fdp); + + const spark::Params* params; + params = spark::Params::get_default(); + const std::string memo = fdp.ConsumeBytesAsString(len); + + spark::SpendKey spend_key(params); + spark::FullViewKey full_view_key(spend_key); + spark::IncomingViewKey incoming_view_key(full_view_key); + + spark::Address address(incoming_view_key, fdp.ConsumeIntegral()); + + size_t N = (size_t) pow(params->get_n_grootle(), params->get_m_grootle()); + + bool exception_thrown = false; + if (memo.size() > params->get_memo_bytes()) { + try{ + Scalar k; + k.randomize(); + uint64_t v = rand(); + spark::Coin(params, spark::COIN_TYPE_MINT, k, address, v, memo, fdp.ConsumeBytes(spark::SCALAR_ENCODING)); + } catch(const std::exception& ) { + exception_thrown = true; + } + assert(exception_thrown); + return 0; + } + + std::vector in_coins; + for (size_t i = 0; i < N; i ++) { + secp_primitives::Scalar k = fsp.GetScalar(); + + uint64_t v = fdp.ConsumeIntegral(); + + in_coins.emplace_back(spark::Coin(params, spark::COIN_TYPE_MINT, k, address, v, memo, fdp.ConsumeBytes(spark::SCALAR_ENCODING))); + } + + uint64_t f = 0; + + std::vector spend_indices = fdp.ConsumeBytes(len); + if (spend_indices.size() < len) { + for (int i = spend_indices.size(); i < len; i++) { + spend_indices.push_back(std::rand()); + } + } + std::vector spend_coin_data; + std::unordered_map cover_set_data; + const size_t w = spend_indices.size(); + for (size_t u = 0; u < w; u++) { + spark::IdentifiedCoinData identified_coin_data = in_coins[spend_indices[u]].identify(incoming_view_key); + spark::RecoveredCoinData recovered_coin_data = in_coins[spend_indices[u]].recover(full_view_key, identified_coin_data); + + spend_coin_data.emplace_back(); + uint64_t cover_set_id = fdp.ConsumeIntegral(); + spend_coin_data.back().cover_set_id = cover_set_id; + + spark::CoverSetData set_data; + set_data.cover_set = in_coins; + set_data.cover_set_representation = fdp.ConsumeBytes(spark::SCALAR_ENCODING); + cover_set_data[cover_set_id] = set_data; + spend_coin_data.back().index = spend_indices[u]; + spend_coin_data.back().k = identified_coin_data.k; + spend_coin_data.back().s = recovered_coin_data.s; + spend_coin_data.back().T = recovered_coin_data.T; + spend_coin_data.back().v = identified_coin_data.v; + + f += identified_coin_data.v; + } + + const size_t t = fdp.ConsumeIntegral(); + std::vector out_coin_data; + for (size_t j = 0; j < t; j++) { + out_coin_data.emplace_back(); + out_coin_data.back().address = address; + out_coin_data.back().v = fdp.ConsumeIntegral(); + out_coin_data.back().memo = memo; + + f -= out_coin_data.back().v; + } + + uint64_t fee_test = f; + for (size_t j = 0; j < t; j++) { + fee_test += out_coin_data[j].v; + } + + for (size_t j = 0; j < t; j++) { + fee_test -= spend_coin_data[j].v; + } + assert(fee_test == 0); + + spark::SpendTransaction transaction(params, full_view_key, spend_key, spend_coin_data, cover_set_data, f, 0, out_coin_data); + + transaction.setCoverSets(cover_set_data); + std::unordered_map> cover_sets; + for (const auto set_data: cover_set_data) { + cover_sets[set_data.first] = set_data.second.cover_set; + } + assert(spark::SpendTransaction::verify(transaction, cover_sets)); + + + return 0; + +} \ No newline at end of file diff --git a/src/libspark/schnorr.cpp b/src/libspark/schnorr.cpp index 353bfbc88c..4657fece77 100644 --- a/src/libspark/schnorr.cpp +++ b/src/libspark/schnorr.cpp @@ -47,6 +47,10 @@ void Schnorr::prove(const std::vector& y, const std::vector& Y, const SchnorrProof& pro const Scalar c = challenge(Y, proof.A); Scalar c_power(c); for (std::size_t i = 0; i < n; i++) { + if (c_power.isZero()) { + throw std::invalid_argument("Unexpected challenge!"); + } + points.emplace_back(Y[i]); scalars.emplace_back(c_power); c_power *= c; diff --git a/src/qt/transactionview.cpp b/src/qt/transactionview.cpp index 6cdf290499..bbd3e4b049 100644 --- a/src/qt/transactionview.cpp +++ b/src/qt/transactionview.cpp @@ -233,8 +233,8 @@ void TransactionView::setModel(WalletModel *_model) transactionView->setAlternatingRowColors(true); transactionView->setSelectionBehavior(QAbstractItemView::SelectRows); transactionView->setSelectionMode(QAbstractItemView::ExtendedSelection); + transactionView->horizontalHeader()->setSortIndicator(TransactionTableModel::Date, Qt::DescendingOrder); transactionView->setSortingEnabled(true); - transactionView->sortByColumn(TransactionTableModel::Date, Qt::DescendingOrder); transactionView->verticalHeader()->hide(); transactionView->setColumnWidth(TransactionTableModel::Status, STATUS_COLUMN_WIDTH); diff --git a/src/rpc/client.cpp b/src/rpc/client.cpp index bf010eb784..4572a9553d 100644 --- a/src/rpc/client.cpp +++ b/src/rpc/client.cpp @@ -193,6 +193,7 @@ static const CRPCConvertParam vRPCConvertParams[] = { "getmintmetadata", 0 }, { "getusedcoinserials", 0 }, { "getlatestcoinids", 0 }, + { "getsparkmintmetadata", 0 }, //Lelantus { "mintspark", 0 }, diff --git a/src/rpc/misc.cpp b/src/rpc/misc.cpp index d7e96a154b..6954bc0a24 100644 --- a/src/rpc/misc.cpp +++ b/src/rpc/misc.cpp @@ -1171,8 +1171,8 @@ UniValue getsparkanonymityset(const JSONRPCRequest& request) " \"setHash\" (string) Anonymity set hash\n" " \"mints\" (Pair) Serialized Spark coin paired with txhash\n" "}\n" - + HelpExampleCli("getsparkanonymityset", "\"1\"" "{\"ca511f07489e35c9bc60ca62c82de225ba7aae7811ce4c090f95aa976639dc4e\"}") - + HelpExampleRpc("getsparkanonymityset", "\"1\"" "{\"ca511f07489e35c9bc60ca62c82de225ba7aae7811ce4c090f95aa976639dc4e\"}") + + HelpExampleCli("getsparkanonymityset", "\"1\" " "{\"ca511f07489e35c9bc60ca62c82de225ba7aae7811ce4c090f95aa976639dc4e\"}") + + HelpExampleRpc("getsparkanonymityset", "\"1\" " "{\"ca511f07489e35c9bc60ca62c82de225ba7aae7811ce4c090f95aa976639dc4e\"}") ); @@ -1248,6 +1248,9 @@ UniValue getsparkmintmetadata(const JSONRPCRequest& request) "{\n" " \"metadata\" (Pair) nHeight and id for each coin\n" "}\n" + + HelpExampleCli("getsparkmintmetadata", "'{\"coinHashes\": [\"b476ed2b374bb081ea51d111f68f0136252521214e213d119b8dc67b92f5a390\",\"b476ed2b374bb081ea51d111f68f0136252521214e213d119b8dc67b92f5a390\"]}'") + + HelpExampleRpc("getsparkmintmetadata", "{\"coinHashes\": [\"b476ed2b374bb081ea51d111f68f0136252521214e213d119b8dc67b92f5a390\",\"b476ed2b374bb081ea51d111f68f0136252521214e213d119b8dc67b92f5a390\"]}") + ); UniValue coinHashes = find_value(request.params[0].get_obj(), "coinHashes"); diff --git a/src/spark/state.cpp b/src/spark/state.cpp index d6163e608a..33b6169776 100644 --- a/src/spark/state.cpp +++ b/src/spark/state.cpp @@ -609,10 +609,10 @@ bool CheckSparkSpendTransaction( id = idAndHash.first - 1; } if (id) { - if (index->sparkMintedCoins.count(idAndHash.first) > 0) { + if (index->sparkMintedCoins.count(id) > 0) { BOOST_FOREACH( const auto& coin, - index->sparkMintedCoins[idAndHash.first]) { + index->sparkMintedCoins[id]) { cover_set.push_back(coin); } } @@ -1247,7 +1247,7 @@ void CSparkState::GetCoinsForRecovery( int coinGroupID, std::string start_block_hash, uint256& blockHash_out, - std::vector> coins, + std::vector>& coins, std::vector& setHash_out) { coins.clear(); if (coinGroups.count(coinGroupID) == 0) { diff --git a/src/spark/state.h b/src/spark/state.h index 24262cb4fc..f626ecd283 100644 --- a/src/spark/state.h +++ b/src/spark/state.h @@ -208,7 +208,7 @@ class CSparkState { int coinGroupID, std::string start_block_hash, uint256& blockHash_out, - std::vector> coins, + std::vector>& coins, std::vector& setHash_out); std::unordered_map const & GetMints() const;