diff --git a/.github/workflows/actions.yml b/.github/workflows/actions.yml index 5fa4d668..2c26cdad 100644 --- a/.github/workflows/actions.yml +++ b/.github/workflows/actions.yml @@ -79,5 +79,6 @@ jobs: - name: Run holesky tests env: SIGNER_PRIVATE_KEY: ${{ secrets.SIGNER_PRIVATE_KEY }} + ETHEREUM_RPC: ${{ secrets.ETHEREUM_RPC }} run: | - SIGNER_PRIVATE_KEY=$SIGNER_PRIVATE_KEY make holesky-test \ No newline at end of file + SIGNER_PRIVATE_KEY=$SIGNER_PRIVATE_KEY ETHEREUM_RPC=$ETHEREUM_RPC make holesky-test \ No newline at end of file diff --git a/README.md b/README.md index 4a4b73aa..260c4f09 100644 --- a/README.md +++ b/README.md @@ -28,6 +28,18 @@ Additional CLI args are provided for targeting an EigenDA network backend: * `--eigenda-cache-path`: Directory path to dump cached SRS tables * `--eigenda-max-blob-length`: The maximum blob length that this EigenDA sidecar proxy should expect to be written or read from EigenDA. This configuration setting is used to determine how many SRS points should be loaded into memory for generating/verifying KZG commitments returned by the EigenDA disperser. Valid byte units are either base-2 or base-10 byte amounts (not bits), e.g. `30 MiB`, `4Kb`, `30MB`. The maximum blob size is a little more than `1GB`. + +### Certificate verification +For additional security, there is a cert verification feature which verifies the blob metadata read from the disperser to ensure that: +1. The respective batch hash can be computed locally and matches the one persisted on-chain in the `ServiceManager` contract +2. The blob inclusion proof can be merkalized to generate the proper batch root +3. All quorum params are adequately defined and expressed when compared to their on-chain counterparts + +To target this feature, the following CLI args should be provided: +* `--eigenda-svc-manager-addr`: The deployed EigenDA service manager address. The list can be found [here](https://github.com/Layr-Labs/eigenlayer-middleware/?tab=readme-ov-file#current-mainnet-deployment). +* `--eigenda-eth-rpc` : JSON RPC node endpoint for the Ethereum network used for finalizing DA blobs. See available list [here](https://docs.eigenlayer.xyz/eigenda/networks/). + + ### In-Memory Storage An ephemeral memory store backend can be used for faster feedback testing when performing rollup integrations. The following cli args can be used to target the feature: @@ -35,6 +47,9 @@ An ephemeral memory store backend can be used for faster feedback testing when p * `--memstore.enabled`: Boolean feature flag * `--memstore.expiration`: Duration for which a blob will exist +## Metrics +To the see list of available metrics, run `./bin/eigenda-proxy doc metrics` + ## Running Locally 1. Compile binary: `make eigenda-proxy` diff --git a/cmd/server/main.go b/cmd/server/main.go index 7bfb7a34..c24b00b6 100644 --- a/cmd/server/main.go +++ b/cmd/server/main.go @@ -8,23 +8,22 @@ import ( "github.com/joho/godotenv" "github.com/urfave/cli/v2" + "github.com/Layr-Labs/eigenda-proxy/metrics" "github.com/Layr-Labs/eigenda-proxy/server" - "github.com/ethereum-optimism/optimism/op-node/metrics" - opservice "github.com/ethereum-optimism/optimism/op-service" "github.com/ethereum-optimism/optimism/op-service/cliapp" oplog "github.com/ethereum-optimism/optimism/op-service/log" "github.com/ethereum-optimism/optimism/op-service/metrics/doc" "github.com/ethereum-optimism/optimism/op-service/opio" ) -var Version = "v0.0.1" +var Version = "v1.1.0" func main() { oplog.SetupDefaults() app := cli.NewApp() app.Flags = cliapp.ProtectFlags(server.Flags) - app.Version = opservice.FormatVersion(Version, "", "", "") + app.Version = Version app.Name = "eigenda-proxy" app.Usage = "EigenDA Proxy Sidecar Service" app.Description = "Service for more trustless and secure interactions with EigenDA" diff --git a/common/common.go b/common/common.go index df8cb067..a1e185d1 100644 --- a/common/common.go +++ b/common/common.go @@ -2,6 +2,7 @@ package common import ( "fmt" + "math/big" "strconv" "strings" @@ -12,7 +13,63 @@ var ( ErrInvalidDomainType = fmt.Errorf("invalid domain type") ) -type Certificate = disperser.BlobInfo +// G1Point struct to represent G1Point in Solidity +type G1Point struct { + X *big.Int + Y *big.Int +} + +// QuorumBlobParam struct to represent QuorumBlobParam in Solidity +type QuorumBlobParam struct { + QuorumNumber uint8 + AdversaryThresholdPercentage uint8 + ConfirmationThresholdPercentage uint8 + ChunkLength uint32 +} + +// BlobHeader struct to represent BlobHeader in Solidity +type BlobHeader struct { + Commitment G1Point + DataLength uint32 + QuorumBlobParams []QuorumBlobParam +} + +type Certificate disperser.BlobInfo + +func (c *Certificate) BlobIndex() uint32 { + return c.BlobVerificationProof.BlobIndex +} + +func (c *Certificate) BatchHeaderRoot() []byte { + return c.BlobVerificationProof.BatchMetadata.BatchHeader.BatchRoot +} + +func (c *Certificate) ReadBlobHeader() BlobHeader { + // parse quorum params + + qps := make([]QuorumBlobParam, len(c.BlobHeader.BlobQuorumParams)) + for i, qp := range c.BlobHeader.BlobQuorumParams { + qps[i] = QuorumBlobParam{ + QuorumNumber: uint8(qp.QuorumNumber), + AdversaryThresholdPercentage: uint8(qp.AdversaryThresholdPercentage), + ConfirmationThresholdPercentage: uint8(qp.ConfirmationThresholdPercentage), + ChunkLength: qp.ChunkLength, + } + } + + return BlobHeader{ + Commitment: G1Point{ + X: new(big.Int).SetBytes(c.BlobHeader.Commitment.X), + Y: new(big.Int).SetBytes(c.BlobHeader.Commitment.Y), + }, + DataLength: c.BlobHeader.DataLength, + QuorumBlobParams: qps, + } +} + +func (c *Certificate) Proof() *disperser.BlobVerificationProof { + return c.BlobVerificationProof +} // DomainType is a enumeration type for the different data domains for which a // blob can exist between diff --git a/e2e/optimism_test.go b/e2e/optimism_test.go index 6ac74619..155d1a07 100644 --- a/e2e/optimism_test.go +++ b/e2e/optimism_test.go @@ -149,12 +149,6 @@ func TestOptimism(gt *testing.T) { // verify op_stack.sequencer.ActL2PipelineFull(t) - - // expire the challenge window so these blocks can no longer be challenged - op_stack.ActL1Blocks(t, op_stack.plasmaCfg.ChallengeWindow) - - // advance derivation and finalize plasma via the L1 signal - op_stack.sequencer.ActL2PipelineFull(t) op_stack.ActL1Finalized(t) // assert that EigenDA proxy's was written and read from diff --git a/e2e/server_test.go b/e2e/server_test.go index 2bf43445..6d6e9e3c 100644 --- a/e2e/server_test.go +++ b/e2e/server_test.go @@ -44,7 +44,7 @@ func TestPlasmaClient(t *testing.T) { // 1 - write arbitrary data to EigenDA - var testPreimage = []byte("inter-subjective and not objective!") + var testPreimage = []byte("feel the rain on your skin!") t.Log("Setting input data on proxy server...") commit, err := daClient.SetInput(ts.Ctx, testPreimage) diff --git a/e2e/setup.go b/e2e/setup.go index 7e234e67..e381d65d 100644 --- a/e2e/setup.go +++ b/e2e/setup.go @@ -22,6 +22,7 @@ import ( const ( privateKey = "SIGNER_PRIVATE_KEY" + ethRPC = "ETHEREUM_RPC" transport = "http" svcName = "eigenda_proxy" host = "127.0.0.1" @@ -43,6 +44,12 @@ func CreateTestSuite(t *testing.T, useMemory bool) (TestSuite, func()) { t.Fatal("SIGNER_PRIVATE_KEY environment variable not set") } + // load node url from environment + ethRPC := os.Getenv(ethRPC) + if ethRPC == "" && !useMemory { + t.Fatal("ETHEREUM_RPC environment variable is not set") + } + log := oplog.NewLogger(os.Stdout, oplog.CLIConfig{ Level: log.LevelDebug, Format: oplog.FormatLogFmt, @@ -57,6 +64,8 @@ func CreateTestSuite(t *testing.T, useMemory bool) (TestSuite, func()) { DisableTLS: false, SignerPrivateKeyHex: pk, }, + EthRPC: ethRPC, + SvcManagerAddr: "0xD4A7E1Bd8015057293f0D0A557088c286942e84b", // incompatible with non holeskly networks CacheDir: "../operator-setup/resources/SRSTables", G1Path: "../operator-setup/resources/g1_abbr.point", G2Path: "../test/resources/kzg/g2.point", // do we need this? diff --git a/eigenda/config.go b/eigenda/config.go index 4a3e8de5..c7c2ff0e 100644 --- a/eigenda/config.go +++ b/eigenda/config.go @@ -7,6 +7,7 @@ import ( "time" "github.com/Layr-Labs/eigenda-proxy/common" + "github.com/Layr-Labs/eigenda-proxy/verify" "github.com/Layr-Labs/eigenda/api/clients" "github.com/Layr-Labs/eigenda/api/clients/codecs" "github.com/Layr-Labs/eigenda/encoding/kzg" @@ -16,6 +17,8 @@ import ( const ( RPCFlagName = "eigenda-rpc" + EthRPCFlagName = "eigenda-eth-rpc" + SvcManagerAddrFlagName = "eigenda-svc-manager-addr" StatusQueryRetryIntervalFlagName = "eigenda-status-query-retry-interval" StatusQueryTimeoutFlagName = "eigenda-status-query-timeout" DisableTlsFlagName = "eigenda-disable-tls" @@ -44,6 +47,10 @@ type Config struct { // The blob encoding version to use when writing blobs from the high level interface. PutBlobEncodingVersion codecs.BlobEncodingVersion + // ETH vars + EthRPC string + SvcManagerAddr string + // KZG vars CacheDir string @@ -73,14 +80,15 @@ func (c *Config) GetMaxBlobLength() (uint64, error) { return c.maxBlobLengthBytes, nil } -func (c *Config) KzgConfig() *kzg.KzgConfig { +func (c *Config) VerificationCfg() *verify.Config { numBytes, err := c.GetMaxBlobLength() if err != nil { panic(fmt.Errorf("Check() was not called on config object, err is not nil: %w", err)) } numPointsNeeded := uint64(math.Ceil(float64(numBytes) / BytesPerSymbol)) - return &kzg.KzgConfig{ + + kzgCfg := &kzg.KzgConfig{ G1Path: c.G1Path, G2PowerOf2Path: c.G2PowerOfTauPath, CacheDir: c.CacheDir, @@ -88,6 +96,21 @@ func (c *Config) KzgConfig() *kzg.KzgConfig { SRSNumberToLoad: numPointsNeeded, NumWorker: uint64(runtime.GOMAXPROCS(0)), } + + if c.EthRPC == "" || c.SvcManagerAddr == "" { + return &verify.Config{ + Verify: false, + KzgConfig: kzgCfg, + } + } + + return &verify.Config{ + Verify: true, + RPCURL: c.EthRPC, + SvcManagerAddr: c.SvcManagerAddr, + KzgConfig: kzgCfg, + } + } // NewConfig parses the Config from the provided flags or environment variables. @@ -109,6 +132,8 @@ func ReadConfig(ctx *cli.Context) Config { G2PowerOfTauPath: ctx.String(G2TauFlagName), CacheDir: ctx.String(CachePathFlagName), MaxBlobLength: ctx.String(MaxBlobLengthFlagName), + SvcManagerAddr: ctx.String(SvcManagerAddrFlagName), + EthRPC: ctx.String(EthRPCFlagName), } return cfg } @@ -199,5 +224,15 @@ func CLIFlags(envPrefix string) []cli.Flag { Usage: "Directory path to SRS tables", EnvVars: prefixEnvVars("TARGET_CACHE_PATH"), }, + &cli.StringFlag{ + Name: EthRPCFlagName, + Usage: "JSON RPC node endpoint for the Ethereum network used for finalizing DA blobs.", + EnvVars: prefixEnvVars("ETH_RPC"), + }, + &cli.StringFlag{ + Name: SvcManagerAddrFlagName, + Usage: "Deployed EigenDA service manager address.", + EnvVars: prefixEnvVars("SERVICE_MANAGER_ADDR"), + }, } } diff --git a/server/load_store.go b/server/load_store.go index 1ac7bc8c..0947822e 100644 --- a/server/load_store.go +++ b/server/load_store.go @@ -10,14 +10,20 @@ import ( ) func LoadStore(cfg CLIConfig, ctx context.Context, log log.Logger) (store.Store, error) { - log.Info("Using eigenda backend") daCfg := cfg.EigenDAConfig + vCfg := daCfg.VerificationCfg() - verifier, err := verify.NewVerifier(daCfg.KzgConfig()) + verifier, err := verify.NewVerifier(vCfg, log) if err != nil { return nil, err } + if vCfg.Verify { + log.Info("Certificate verification with Ethereum enabled") + } else { + log.Warn("Verification disabled") + } + maxBlobLength, err := daCfg.GetMaxBlobLength() if err != nil { return nil, err @@ -28,6 +34,7 @@ func LoadStore(cfg CLIConfig, ctx context.Context, log log.Logger) (store.Store, return store.NewMemStore(ctx, &cfg.MemStoreCfg, verifier, log, maxBlobLength) } + log.Info("Using eigenda backend") client, err := clients.NewEigenDAClient(log, daCfg.ClientConfig) if err != nil { return nil, err diff --git a/store/eigenda.go b/store/eigenda.go index 8b4071e7..dd96ce07 100644 --- a/store/eigenda.go +++ b/store/eigenda.go @@ -43,10 +43,15 @@ func (e EigenDAStore) Get(ctx context.Context, key []byte, domain common.DomainT // reencode blob for verification encodedBlob, err := e.client.GetCodec().EncodeBlob(decodedBlob) if err != nil { - return nil, fmt.Errorf("EigenDA client failed to reencode blob: %w", err) + return nil, fmt.Errorf("EigenDA client failed to re-encode blob: %w", err) } - err = e.verifier.Verify(cert.BlobHeader.Commitment, encodedBlob) + err = e.verifier.VerifyCommitment(cert.BlobHeader.Commitment, encodedBlob) + if err != nil { + return nil, err + } + + err = e.verifier.VerifyCert(&cert) if err != nil { return nil, err } @@ -73,9 +78,9 @@ func (e EigenDAStore) Put(ctx context.Context, value []byte) (comm []byte, err e encodedBlob, err := e.client.GetCodec().EncodeBlob(value) if err != nil { - return nil, fmt.Errorf("EigenDA client failed to reencode blob: %w", err) + return nil, fmt.Errorf("EigenDA client failed to re-encode blob: %w", err) } - err = e.verifier.Verify(cert.BlobHeader.Commitment, encodedBlob) + err = e.verifier.VerifyCommitment(cert.BlobHeader.Commitment, encodedBlob) if err != nil { return nil, err } diff --git a/store/memory.go b/store/memory.go index 9d80edae..9535c83b 100644 --- a/store/memory.go +++ b/store/memory.go @@ -120,7 +120,7 @@ func (e *MemStore) Get(ctx context.Context, commit []byte, domain eigendacommon. } // Don't need to do this really since it's a mock store - err = e.verifier.Verify(cert.BlobHeader.Commitment, encodedBlob) + err = e.verifier.VerifyCommitment(cert.BlobHeader.Commitment, encodedBlob) if err != nil { return nil, err } diff --git a/store/memory_test.go b/store/memory_test.go index f3ce1783..3af5fb7f 100644 --- a/store/memory_test.go +++ b/store/memory_test.go @@ -29,7 +29,13 @@ func TestGetSet(t *testing.T) { SRSNumberToLoad: 3000, NumWorker: uint64(runtime.GOMAXPROCS(0)), } - verifier, err := verify.NewVerifier(kzgConfig) + + cfg := &verify.Config{ + Verify: false, + KzgConfig: kzgConfig, + } + + verifier, err := verify.NewVerifier(cfg, nil) assert.NoError(t, err) ms, err := NewMemStore( @@ -68,7 +74,13 @@ func TestExpiration(t *testing.T) { SRSNumberToLoad: 3000, NumWorker: uint64(runtime.GOMAXPROCS(0)), } - verifier, err := verify.NewVerifier(kzgConfig) + + cfg := &verify.Config{ + Verify: false, + KzgConfig: kzgConfig, + } + + verifier, err := verify.NewVerifier(cfg, nil) assert.NoError(t, err) ms, err := NewMemStore( diff --git a/verify/cert.go b/verify/cert.go new file mode 100644 index 00000000..35314c36 --- /dev/null +++ b/verify/cert.go @@ -0,0 +1,86 @@ +package verify + +import ( + "fmt" + + proxy_common "github.com/Layr-Labs/eigenda-proxy/common" + binding "github.com/Layr-Labs/eigenda/contracts/bindings/EigenDAServiceManager" + "github.com/ethereum/go-ethereum/common" + + "github.com/ethereum/go-ethereum/ethclient" + "github.com/ethereum/go-ethereum/log" +) + +// CertVerifier verifies the DA certificate against on-chain EigenDA contracts +// to ensure disperser returned fields haven't been tampered with +type CertVerifier struct { + manager *binding.ContractEigenDAServiceManagerCaller +} + +func NewCertVerifier(cfg *Config, l log.Logger) (*CertVerifier, error) { + client, err := ethclient.Dial(cfg.RPCURL) + if err != nil { + return nil, fmt.Errorf("failed to dial ETH RPC node: %s", err.Error()) + } + + // construct binding + m, err := binding.NewContractEigenDAServiceManagerCaller(common.HexToAddress(cfg.SvcManagerAddr), client) + if err != nil { + return nil, err + } + + return &CertVerifier{ + manager: m, + }, nil +} + +func (cv *CertVerifier) VerifyBatch(header *binding.IEigenDAServiceManagerBatchHeader, + id uint32, recordHash [32]byte, blockNum uint32) error { + // 1 - Verify batch hash + + // 1.a - ensure that a batch hash can be looked up for a batch ID + expectedHash, err := cv.manager.BatchIdToBatchMetadataHash(nil, id) + if err != nil { + return err + } + + // 1.b - ensure that hash generated from local cert matches one stored on-chain + + actualHash, err := HashBatchMetadata(header, recordHash, blockNum) + + if err != nil { + return err + } + + equal := proxy_common.EqualSlices(expectedHash[:], actualHash[:]) + if !equal { + return fmt.Errorf("batch hash mismatch, expected: %x, got: %x", expectedHash, actualHash) + } + + return nil +} + +// VerifyMerkleProof +func (cv *CertVerifier) VerifyMerkleProof(inclusionProof []byte, root []byte, blobIndex uint32, blobHeader proxy_common.BlobHeader) error { + leafHash, err := HashEncodeBlobHeader(blobHeader) + if err != nil { + return err + } + + generatedRoot, err := ProcessInclusionProof(inclusionProof, leafHash, uint64(blobIndex)) + if err != nil { + return err + } + + equal := proxy_common.EqualSlices(root, generatedRoot.Bytes()) + if !equal { + return fmt.Errorf("root hash mismatch, expected: %x, got: %x", root, generatedRoot) + } + + return nil +} + +// 3 - (TODO) verify blob security params +func (cv *CertVerifier) VerifyBlobParams(inclusionProof []byte, rootHash []byte, leafHash []byte, index uint64) error { + return nil +} diff --git a/verify/hasher.go b/verify/hasher.go new file mode 100644 index 00000000..ca2353c6 --- /dev/null +++ b/verify/hasher.go @@ -0,0 +1,145 @@ +package verify + +import ( + "encoding/binary" + + common "github.com/Layr-Labs/eigenda-proxy/common" + binding "github.com/Layr-Labs/eigenda/contracts/bindings/EigenDAServiceManager" + "github.com/ethereum/go-ethereum/accounts/abi" + geth_common "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" +) + +// HashBatchMetadata regenerates a batch data hash +// replicates: https://github.com/Layr-Labs/eigenda-utils/blob/c4cbc9ec078aeca3e4a04bd278e2fb136bf3e6de/src/libraries/EigenDAHasher.sol#L46-L54 +func HashBatchMetadata(bh *binding.IEigenDAServiceManagerBatchHeader, sigHash [32]byte, blockNum uint32) (geth_common.Hash, error) { + batchHeaderType, err := abi.NewType("tuple", "", []abi.ArgumentMarshaling{ + { + Name: "blobHeadersRoot", + Type: "bytes32", + }, + { + Name: "quorumNumbers", + Type: "bytes", + }, + { + Name: "signedStakeForQuorums", + Type: "bytes", + }, + { + Name: "referenceBlockNumber", + Type: "uint32", + }, + }) + + if err != nil { + return [32]byte{}, err + } + + arguments := abi.Arguments{ + { + Type: batchHeaderType, + }, + } + + s := struct { + BlobHeadersRoot [32]byte + QuorumNumbers []byte + SignedStakeForQuorums []byte + ReferenceBlockNumber uint32 + }{ + BlobHeadersRoot: bh.BlobHeadersRoot, + QuorumNumbers: bh.QuorumNumbers, + SignedStakeForQuorums: bh.SignedStakeForQuorums, + ReferenceBlockNumber: bh.ReferenceBlockNumber, + } + + bytes, err := arguments.Pack(s) + if err != nil { + return [32]byte{}, nil + } + + headerHash := crypto.Keccak256Hash(bytes) + return HashBatchHashedMetadata(headerHash, sigHash, blockNum) +} + +// HashBatchHashedMetadata hashes the given metadata into the commitment that will be stored in the contract +// replicates: https://github.com/Layr-Labs/eigenda-utils/blob/c4cbc9ec078aeca3e4a04bd278e2fb136bf3e6de/src/libraries/EigenDAHasher.sol#L19-L25 +func HashBatchHashedMetadata(batchHeaderHash [32]byte, signatoryRecordHash [32]byte, blockNumber uint32) (geth_common.Hash, error) { + + // since the solidity function uses abi.encodePacked, we need to consolidate the byte space that + // blockNum occupies to only 4 bytes versus 28 or 256 bits when encoded to abi buffer + a := make([]byte, 4) + binary.BigEndian.PutUint32(a, blockNumber) + + bytes32Type, err := abi.NewType("bytes32", "bytes32", nil) + if err != nil { + return geth_common.BytesToHash([]byte{}), err + } + + arguments := abi.Arguments{ + { + Type: bytes32Type, + }, + { + Type: bytes32Type, + }, + } + + bytes, err := arguments.Pack(batchHeaderHash, signatoryRecordHash) + if err != nil { + return [32]byte{}, err + } + + bytes = append(bytes, a...) + headerHash := crypto.Keccak256Hash(bytes) + + return headerHash, nil +} + +// HashBlobHeader function to hash BlobHeader +func HashBlobHeader(blobHeader common.BlobHeader) (geth_common.Hash, error) { + + blobHeaderType, err := abi.NewType("tuple", "", []abi.ArgumentMarshaling{ + {Name: "commitment", Type: "tuple", Components: []abi.ArgumentMarshaling{ + {Name: "X", Type: "uint256"}, + {Name: "Y", Type: "uint256"}, + }}, + {Name: "dataLength", Type: "uint32"}, + {Name: "quorumBlobParams", Type: "tuple[]", Components: []abi.ArgumentMarshaling{ + {Name: "quorumNumber", Type: "uint8"}, + {Name: "adversaryThresholdPercentage", Type: "uint8"}, + {Name: "confirmationThresholdPercentage", Type: "uint8"}, + {Name: "chunkLength", Type: "uint32"}, + }}, + }) + if err != nil { + return geth_common.Hash{}, err + } + + // Create ABI arguments + arguments := abi.Arguments{ + {Type: blobHeaderType}, + } + + // Pack the BlobHeader + bytes, err := arguments.Pack(blobHeader) + if err != nil { + return geth_common.Hash{}, err + } + // Hash the packed bytes using Keccak256 + hash := crypto.Keccak256Hash(bytes) + return hash, nil +} + +// Function to hash and encode header +func HashEncodeBlobHeader(header common.BlobHeader) (geth_common.Hash, error) { + // Hash the BlobHeader + blobHash, err := HashBlobHeader(header) + if err != nil { + return geth_common.Hash{}, err + } + + finalHash := crypto.Keccak256Hash(blobHash.Bytes()) + return finalHash, nil +} diff --git a/verify/hasher_test.go b/verify/hasher_test.go new file mode 100644 index 00000000..db4d09e6 --- /dev/null +++ b/verify/hasher_test.go @@ -0,0 +1,135 @@ +package verify + +import ( + "math/big" + "testing" + + "github.com/Layr-Labs/eigenda-proxy/common" + eigenda_common "github.com/Layr-Labs/eigenda/api/grpc/common" + "github.com/Layr-Labs/eigenda/api/grpc/disperser" + binding "github.com/Layr-Labs/eigenda/contracts/bindings/EigenDAServiceManager" + "github.com/ethereum/go-ethereum/crypto" + "github.com/stretchr/testify/require" +) + +func TestHashBatchHashedMetadata(t *testing.T) { + batchHeaderHash := crypto.Keccak256Hash([]byte("batchHeader")) + sigRecordHash := crypto.Keccak256Hash([]byte("signatoryRecord")) + + // 1 - Test using uint32 MAX + var blockNum uint32 = 4294967295 + + expected := "0x687b60d8b30b6aaddf6413728fb66fb7a7554601c2cc8e17a37fa94ad0818500" + actual, err := HashBatchHashedMetadata(batchHeaderHash, sigRecordHash, blockNum) + require.NoError(t, err) + + require.Equal(t, expected, actual.String()) + + // 2 - Test using uint32 value + blockNum = 4294967294 + + expected = "0x94d77be4d3d180d32d61ec8037e687b71e7996feded39b72a6dc3f9ff6406b30" + actual, err = HashBatchHashedMetadata(batchHeaderHash, sigRecordHash, blockNum) + require.NoError(t, err) + + require.Equal(t, expected, actual.String()) + + // 3 - Testing using uint32 0 value + blockNum = 0 + + expected = "0x482dfb1545a792b6d118a045033143d0cc28b0e5a4b2e1924decf27e4fc8c250" + actual, err = HashBatchHashedMetadata(batchHeaderHash, sigRecordHash, blockNum) + require.NoError(t, err) + + require.Equal(t, expected, actual.String()) +} + +func TestHashBatchMetadata(t *testing.T) { + testHash := crypto.Keccak256Hash([]byte("batchHeader")) + + header := &binding.IEigenDAServiceManagerBatchHeader{ + BlobHeadersRoot: testHash, + QuorumNumbers: testHash.Bytes(), + SignedStakeForQuorums: testHash.Bytes(), + ReferenceBlockNumber: 1, + } + + expected := "0x746f8a453586621d12e41d097eab089b1f25beca44c434281d68d4be0484b7e8" + + actual, err := HashBatchMetadata(header, testHash, 1) + require.NoError(t, err) + require.Equal(t, actual.String(), expected) + +} + +func TestHashBlobHeader(t *testing.T) { + expected := "0xba4675a31c9bf6b2f7abfdcedd34b74645cb7332b35db39bff00ae8516a67393" + + // [[1,1],2,[[2,4,5,6]]] + header := &disperser.BlobHeader{ + Commitment: &eigenda_common.G1Commitment{ + X: big.NewInt(1).Bytes(), + Y: big.NewInt(1).Bytes(), + }, + DataLength: 2, + BlobQuorumParams: []*disperser.BlobQuorumParam{ + { + QuorumNumber: 2, + AdversaryThresholdPercentage: 4, + ConfirmationThresholdPercentage: 5, + ChunkLength: 6, + }, + { + QuorumNumber: 2, + AdversaryThresholdPercentage: 4, + ConfirmationThresholdPercentage: 5, + ChunkLength: 6, + }, + }, + } + + cert := &common.Certificate{ + BlobHeader: header, + } + + actual, err := HashBlobHeader(cert.ReadBlobHeader()) + + require.NoError(t, err) + require.Equal(t, expected, actual.String()) +} + +func TestHashEncodeBlobHeader(t *testing.T) { + expected := "0xf15f43fa44bae9b74cd2f88f8f838e09ff7ab5d50f2170f07b98479eb7da98ba" + + // [[1,1],2,[[2,4,5,6]]] + header := &disperser.BlobHeader{ + Commitment: &eigenda_common.G1Commitment{ + X: big.NewInt(1).Bytes(), + Y: big.NewInt(1).Bytes(), + }, + DataLength: 2, + BlobQuorumParams: []*disperser.BlobQuorumParam{ + { + QuorumNumber: 2, + AdversaryThresholdPercentage: 4, + ConfirmationThresholdPercentage: 5, + ChunkLength: 6, + }, + { + QuorumNumber: 2, + AdversaryThresholdPercentage: 4, + ConfirmationThresholdPercentage: 5, + ChunkLength: 6, + }, + }, + } + + cert := &common.Certificate{ + BlobHeader: header, + } + + actual, err := HashEncodeBlobHeader(cert.ReadBlobHeader()) + + require.NoError(t, err) + require.Equal(t, expected, actual.String()) +} diff --git a/verify/merkle.go b/verify/merkle.go new file mode 100644 index 00000000..3d8bc9b7 --- /dev/null +++ b/verify/merkle.go @@ -0,0 +1,33 @@ +package verify + +import ( + "errors" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" +) + +// ProcessInclusionProof processes the Merkle proof +func ProcessInclusionProof(proof []byte, leaf common.Hash, index uint64) (common.Hash, error) { + if len(proof) == 0 || len(proof)%32 != 0 { + return common.Hash{}, errors.New("proof length should be a multiple of 32 bytes or 256 bits") + } + + computedHash := leaf + for i := 0; i < len(proof); i += 32 { + var proofElement common.Hash + copy(proofElement[:], proof[i:i+32]) + + var combined []byte + if index%2 == 0 { // right + combined = append(computedHash.Bytes(), proofElement.Bytes()...) + } else { // left + combined = append(proofElement.Bytes(), computedHash.Bytes()...) + } + + computedHash = crypto.Keccak256Hash(combined) + index = index / 2 + } + + return computedHash, nil +} diff --git a/verify/merkle_test.go b/verify/merkle_test.go new file mode 100644 index 00000000..2ac5e002 --- /dev/null +++ b/verify/merkle_test.go @@ -0,0 +1,41 @@ +package verify + +import ( + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/stretchr/testify/require" +) + +func TestProcessInclusionProofPass(t *testing.T) { + proof, err := hexutil.Decode("0xc455c1ea0e725d7ea3e5f29e9f48be8fc2787bb0a914d5a86710ba302c166ac4f626d76f67f1055bb960a514fb8923af2078fd84085d712655b58a19612e8cd15c3e4ac1cef57acde3438dbcf63f47c9fefe1221344c4d5c1a4943dd0d1803091ca81a270909dc0e146841441c9bd0e08e69ce6168181a3e4060ffacf3627480bec6abdd8d7bb92b49d33f180c42f49e041752aaded9c403db3a17b85e48a11e9ea9a08763f7f383dab6d25236f1b77c12b4c49c5cdbcbea32554a604e3f1d2f466851cb43fe73617b3d01e665e4c019bf930f92dea7394c25ed6a1e200d051fb0c30a2193c459f1cfef00bf1ba6656510d16725a4d1dc031cb759dbc90bab427b0f60ddc6764681924dda848824605a4f08b7f526fe6bd4572458c94e83fbf2150f2eeb28d3011ec921996dc3e69efa52d5fcf3182b20b56b5857a926aa66605808079b4d52c0c0cfe06923fa92e65eeca2c3e6126108e8c1babf5ac522f4d7") + require.NoError(t, err) + + leaf := common.HexToHash("0xf6106e6ae4631e68abe0fa898cedbe97dbae6c7efb1b088c5aa2e8b91190ff96") + index := uint64(580) + + expectedRoot, err := hexutil.Decode("0x7390b8023db8248123dcaeca57fa6c9340bef639e204f2278fc7ec3d46ad071b") + require.NoError(t, err) + + actualRoot, err := ProcessInclusionProof(proof, leaf, index) + require.NoError(t, err) + + require.Equal(t, expectedRoot, actualRoot.Bytes()) +} + +func TestProcessInclusionProofFail(t *testing.T) { + proof, err := hexutil.Decode("0xc455c1ea0e725d7ea3e5f29e9f48be8fc2787bb0a914d5a86710ba302c166ac4f626d76f67f1055bb960a514fb8923af2078fd84085d712655b58a19612e8cd15c3e4ac1cef57acde3438dbcf63f47c9fefe1221344c4d5c1a4943dd0d1803091ca81a270909dc0e146841441c9bd0e08e69ce6168181a3e4060ffacf3627480bec6abdd8d7bb92b49d33f180c42f49e041752aaded9c403db3a17b85e48a11e9ea9a08763f7f383dab6d25236f1b77c12b4c49c5cdbcbea32554a604e3f1d2f466851cb43fe73617b3d01e665e4c019bf930f92dea7394c25ed6a1e200d051fb0c30a2193c459f1cfef00bf1ba6656510d16725a4d1dc031cb759dbc90bab427b0f60ddc6764681924dda848824605a4f08b7f526fe6bd4572458c94e83fbf2150f2eeb28d3011ec921996dc3e69efa52d5fcf3182b20b56b5857a926aa66605808079b4d52c0c0cfe06923fa92e65eeca2c3e6126108e8c1babf5ac522f4d7") + require.NoError(t, err) + + leaf := common.HexToHash("0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF") + index := uint64(580) + + expectedRoot, err := hexutil.Decode("0x7390b8023db8248123dcaeca57fa6c9340bef639e204f2278fc7ec3d46ad071b") + require.NoError(t, err) + + actualRoot, err := ProcessInclusionProof(proof, leaf, index) + require.NoError(t, err) + + require.NotEqual(t, expectedRoot, actualRoot.Bytes()) +} diff --git a/verify/verifier.go b/verify/verifier.go index 298db2b4..651d5eb7 100644 --- a/verify/verifier.go +++ b/verify/verifier.go @@ -7,27 +7,86 @@ import ( "github.com/Layr-Labs/eigenda/encoding" "github.com/consensys/gnark-crypto/ecc/bn254" "github.com/consensys/gnark-crypto/ecc/bn254/fp" + "github.com/ethereum/go-ethereum/log" + + proxy_common "github.com/Layr-Labs/eigenda-proxy/common" + + binding "github.com/Layr-Labs/eigenda/contracts/bindings/EigenDAServiceManager" "github.com/Layr-Labs/eigenda/encoding/kzg" "github.com/Layr-Labs/eigenda/encoding/kzg/prover" "github.com/Layr-Labs/eigenda/encoding/rs" ) +type Config struct { + Verify bool + RPCURL string + SvcManagerAddr string + KzgConfig *kzg.KzgConfig +} + type Verifier struct { - prover *prover.Prover + verifyCert bool + prover *prover.Prover + cv *CertVerifier } -func NewVerifier(cfg *kzg.KzgConfig) (*Verifier, error) { - prover, err := prover.NewProver(cfg, false) // don't load G2 points +func NewVerifier(cfg *Config, l log.Logger) (*Verifier, error) { + var cv *CertVerifier + var err error + + if cfg.Verify { + cv, err = NewCertVerifier(cfg, l) + if err != nil { + return nil, err + } + } + + prover, err := prover.NewProver(cfg.KzgConfig, false) // don't load G2 points if err != nil { return nil, err } return &Verifier{ - prover: prover, + verifyCert: cfg.Verify, + prover: prover, + cv: cv, }, nil } +func (v *Verifier) VerifyCert(cert *proxy_common.Certificate) error { + if !v.verifyCert { + return nil + } + + // 1 - verify batch + header := binding.IEigenDAServiceManagerBatchHeader{ + BlobHeadersRoot: [32]byte(cert.Proof().GetBatchMetadata().GetBatchHeader().GetBatchRoot()), + QuorumNumbers: cert.Proof().GetBatchMetadata().GetBatchHeader().GetQuorumNumbers(), + ReferenceBlockNumber: cert.Proof().GetBatchMetadata().GetBatchHeader().GetReferenceBlockNumber(), + SignedStakeForQuorums: cert.Proof().GetBatchMetadata().GetBatchHeader().GetQuorumSignedPercentages(), + } + + err := v.cv.VerifyBatch(&header, cert.Proof().GetBatchId(), [32]byte(cert.Proof().BatchMetadata.GetSignatoryRecordHash()), cert.Proof().BatchMetadata.GetConfirmationBlockNumber()) + if err != nil { + return err + } + + // 2 - verify merkle inclusion proof + err = v.cv.VerifyMerkleProof(cert.Proof().GetInclusionProof(), cert.BatchHeaderRoot(), cert.Proof().GetBlobIndex(), cert.ReadBlobHeader()) + if err != nil { + return err + } + + // 3 - verify security parameters + err = v.VerifySecurityParams(cert.ReadBlobHeader(), header) + if err != nil { + return err + } + + return nil +} + func (v *Verifier) Commit(blob []byte) (*bn254.G1Affine, error) { // ChunkLength and TotalChunks aren't relevant for computing data // commitment which is why they're currently set arbitrarily @@ -54,7 +113,7 @@ func (v *Verifier) Commit(blob []byte) (*bn254.G1Affine, error) { // Verify regenerates a commitment from the blob and asserts equivalence // to the commitment in the certificate // TODO: Optimize implementation by opening a point on the commitment instead -func (v *Verifier) Verify(expectedCommit *common.G1Commitment, blob []byte) error { +func (v *Verifier) VerifyCommitment(expectedCommit *common.G1Commitment, blob []byte) error { actualCommit, err := v.Commit(blob) if err != nil { return err @@ -75,3 +134,64 @@ func (v *Verifier) Verify(expectedCommit *common.G1Commitment, blob []byte) erro return nil } + +// VerifySecurityParams ensures that returned security parameters are valid +func (v *Verifier) VerifySecurityParams(blobHeader proxy_common.BlobHeader, batchHeader binding.IEigenDAServiceManagerBatchHeader) error { + + confirmedQuorums := make(map[uint8]bool) + + // require that the security param in each blob is met + for i := 0; i < len(blobHeader.QuorumBlobParams); i++ { + if batchHeader.QuorumNumbers[i] != blobHeader.QuorumBlobParams[i].QuorumNumber { + return fmt.Errorf("quorum number mismatch, expected: %d, got: %d", batchHeader.QuorumNumbers[i], blobHeader.QuorumBlobParams[i].QuorumNumber) + } + + if blobHeader.QuorumBlobParams[i].AdversaryThresholdPercentage > blobHeader.QuorumBlobParams[i].ConfirmationThresholdPercentage { + return fmt.Errorf("adversary threshold percentage must be greater than or equal to confirmation threshold percentage") + } + + quorumAdversaryThreshold, err := v.getQuorumAdversaryThreshold(blobHeader.QuorumBlobParams[i].QuorumNumber) + if err != nil { + log.Warn("failed to get quorum adversary threshold", "err", err) + } + + if quorumAdversaryThreshold > 0 && blobHeader.QuorumBlobParams[i].AdversaryThresholdPercentage < quorumAdversaryThreshold { + return fmt.Errorf("adversary threshold percentage must be greater than or equal to quorum adversary threshold percentage") + } + + if batchHeader.SignedStakeForQuorums[i] < blobHeader.QuorumBlobParams[i].ConfirmationThresholdPercentage { + return fmt.Errorf("signed stake for quorum must be greater than or equal to confirmation threshold percentage") + } + + confirmedQuorums[blobHeader.QuorumBlobParams[i].QuorumNumber] = true + } + + requiredQuorums, err := v.cv.manager.QuorumNumbersRequired(nil) + if err != nil { + log.Warn("failed to get required quorum numbers", "err", err) + } + + // ensure that required quorums are present in the confirmed ones + for _, quorum := range requiredQuorums { + if !confirmedQuorums[quorum] { + return fmt.Errorf("quorum %d is required but not present in confirmed quorums", quorum) + } + } + + return nil +} + +// getQuorumAdversaryThreshold reads the adversarial threshold percentage for a given quorum number +// returns 0 if DNE +func (v *Verifier) getQuorumAdversaryThreshold(quorumNum uint8) (uint8, error) { + percentages, err := v.cv.manager.QuorumAdversaryThresholdPercentages(nil) + if err != nil { + return 0, err + } + + if len(percentages) > int(quorumNum) { + return percentages[quorumNum], nil + } + + return 0, nil +} diff --git a/verify/verify_test.go b/verify/verify_test.go index 19ec5c8b..0e7ccdb3 100644 --- a/verify/verify_test.go +++ b/verify/verify_test.go @@ -11,7 +11,7 @@ import ( "github.com/stretchr/testify/assert" ) -func TestVerification(t *testing.T) { +func TestCommitmentVerification(t *testing.T) { t.Parallel() var data = []byte("inter-subjective and not objective!") @@ -36,19 +36,24 @@ func TestVerification(t *testing.T) { NumWorker: uint64(runtime.GOMAXPROCS(0)), } - v, err := NewVerifier(kzgConfig) + cfg := &Config{ + Verify: false, + KzgConfig: kzgConfig, + } + + v, err := NewVerifier(cfg, nil) assert.NoError(t, err) // Happy path verification codec := codecs.NewIFFTCodec(codecs.NewDefaultBlobCodec()) blob, err := codec.EncodeBlob(data) assert.NoError(t, err) - err = v.Verify(c, blob) + err = v.VerifyCommitment(c, blob) assert.NoError(t, err) // failure with wrong data fakeData, err := codec.EncodeBlob([]byte("I am an imposter!!")) assert.NoError(t, err) - err = v.Verify(c, fakeData) + err = v.VerifyCommitment(c, fakeData) assert.Error(t, err) }